Skip to content

Commit

Permalink
Merge pull request #2 from glinscott/next
Browse files Browse the repository at this point in the history
get latest from leela-chess
  • Loading branch information
ankan-ban authored Apr 30, 2018
2 parents d06a063 + 5a08857 commit 57e2b3c
Show file tree
Hide file tree
Showing 23 changed files with 1,334 additions and 939 deletions.
16 changes: 6 additions & 10 deletions lc0/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ tensorflow_cc = declare_dependency(
deps = []
deps += tensorflow_cc
deps += cc.find_library('stdc++fs')
deps += cc.find_library('libcublas', dirs: '/opt/cuda/lib64/')
deps += cc.find_library('libcudnn', dirs: '/opt/cuda/lib64/')
deps += cc.find_library('libcudart', dirs: '/opt/cuda/lib64/')
deps += cc.find_library('libcublas', dirs: ['/opt/cuda/lib64/', '/usr/local/cuda/lib64/'])
deps += cc.find_library('libcudnn', dirs: ['/opt/cuda/lib64/', '/usr/local/cuda/lib64/'])
deps += cc.find_library('libcudart', dirs: ['/opt/cuda/lib64/', '/usr/local/cuda/lib64/'])
# deps += dependency('libprofiler')

nvcc = find_program('nvcc')
Expand All @@ -38,7 +38,7 @@ cuda_files = [

cuda_gen = generator(nvcc,
output: '@[email protected]',
arguments: ['-c', '@INPUT@', '-o', '@OUTPUT@', '-I', '../src'],
arguments: ['--std=c++14', '-c', '@INPUT@', '-o', '@OUTPUT@', '-I', '../src'],
)

files = [
Expand All @@ -47,6 +47,7 @@ files = [
'src/mcts/node.cc',
'src/mcts/search.cc',
'src/neural/cache.cc',
'src/neural/factory.cc',
'src/neural/loader.cc',
'src/neural/writer.cc',
'src/neural/network_mux.cc',
Expand Down Expand Up @@ -82,12 +83,7 @@ test('ChessBoard',
files, include_directories: includes, dependencies: test_deps
))

test('Network',
executable('network_test', 'src/neural/network_test.cc',
files, include_directories: includes, dependencies: test_deps
))

test('HashCat',
executable('hashcat_test', 'src/utils/hashcat_test.cc',
files, include_directories: includes, dependencies: test_deps
))
))
61 changes: 35 additions & 26 deletions lc0/src/engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,18 @@

#include "engine.h"
#include "mcts/search.h"
#include "neural/factory.h"
#include "neural/loader.h"
#include "neural/network_random.h"

#if CUDNN_EVAL == 1
#include "neural/network_cudnn.h"
#else
#include "neural/network_tf.h"
#endif

namespace lczero {
namespace {
const int kDefaultThreads = 2;
const char* kThreadsOption = "Number of worker threads";
const char* kDebugLogStr = "Do debug logging into file.";
const char* kDebugLogStr = "Do debug logging into file";

const char* kWeightsStr = "Network weights file path";
const char* kNnBackendStr = "NN backend to use";
const char* kNnBackendOptionsStr = "NN backend parameters";

const char* kAutoDiscover = "<autodiscover>";

Expand Down Expand Up @@ -65,35 +63,44 @@ EngineController::EngineController(BestMoveInfo::Callback best_move_callback,
void EngineController::PopulateOptions(OptionsParser* options) {
using namespace std::placeholders;

options->Add<StringOption>(
"Network weights file path", "weights", 'w',
std::bind(&EngineController::SetNetworkPath, this, _1)) = kAutoDiscover;
options->Add<SpinOption>(kThreadsOption, 1, 128, "threads", 't') =
options->Add<StringOption>(kWeightsStr, "weights", 'w') = kAutoDiscover;
options->Add<IntOption>(kThreadsOption, 1, 128, "threads", 't') =
kDefaultThreads;
options->Add<SpinOption>(
options->Add<IntOption>(
"NNCache size", 0, 999999999, "nncache", '\0',
std::bind(&EngineController::SetCacheSize, this, _1)) = 200000;

const auto backends = NetworkFactory::Get()->GetBackendsList();
options->Add<ChoiceOption>(kNnBackendStr, backends, "backend") = backends[0];
options->Add<StringOption>(kNnBackendOptionsStr, "backend-opts");

Search::PopulateUciParams(options);
}

void EngineController::SetNetworkPath(const std::string& path) {
void EngineController::UpdateNetwork() {
SharedLock lock(busy_mutex_);
std::string net_path;
if (path == kAutoDiscover) {
std::string network_path = options_.Get<std::string>(kWeightsStr);
std::string backend = options_.Get<std::string>(kNnBackendStr);
std::string backend_options = options_.Get<std::string>(kNnBackendOptionsStr);

if (network_path == network_path_ && backend == backend_ &&
backend_options == backend_options_)
return;

network_path_ = network_path;
backend_ = backend;
backend_options_ = backend_options;

std::string net_path = network_path;
if (net_path == kAutoDiscover) {
net_path = DiscoveryWeightsFile();
} else {
net_path = path;
}
Weights weights = LoadWeightsFromFile(net_path);

// TODO Make backend selection.
#if CUDNN_EVAL == 1
network_ = MakeCudnnNetwork(weights);
#else
network_ = MakeTensorflowNetwork(weights);
// network_ = MakeRandomNetwork();
#endif
OptionsDict network_options =
OptionsDict::FromString(backend_options, &options_);

network_ = NetworkFactory::Get()->Create(backend, weights, network_options);
}

void EngineController::SetCacheSize(int size) { cache_.SetCapacity(size); }
Expand All @@ -102,6 +109,7 @@ void EngineController::NewGame() {
SharedLock lock(busy_mutex_);
search_.reset();
tree_.reset();
UpdateNetwork();
}

void EngineController::SetPosition(const std::string& fen,
Expand All @@ -115,6 +123,7 @@ void EngineController::SetPosition(const std::string& fen,
std::vector<Move> moves;
for (const auto& move : moves_str) moves.emplace_back(move);
tree_->ResetToPosition(fen, moves);
UpdateNetwork();
}

void EngineController::Go(const GoParams& params) {
Expand Down Expand Up @@ -200,4 +209,4 @@ void EngineLoop::CmdGo(const GoParams& params) {

void EngineLoop::CmdStop() { engine_.Stop(); }

} // namespace lczero
} // namespace lczero
9 changes: 8 additions & 1 deletion lc0/src/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,11 @@ class EngineController {
void Go(const GoParams& params);
// Must not block.
void Stop();
void SetNetworkPath(const std::string& path);
void SetCacheSize(int size);

private:
void UpdateNetwork();

const OptionsDict& options_;

BestMoveInfo::Callback best_move_callback_;
Expand All @@ -78,6 +79,12 @@ class EngineController {
std::unique_ptr<NodePool> node_pool_;
std::unique_ptr<Search> search_;
std::unique_ptr<NodeTree> tree_;

// Store current network settings to track when they change so that they
// are reloaded.
std::string network_path_;
std::string backend_;
std::string backend_options_;
};

class EngineLoop : public UciLoop {
Expand Down
10 changes: 5 additions & 5 deletions lc0/src/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,26 +16,26 @@
along with Leela Chess. If not, see <http://www.gnu.org/licenses/>.
*/

#include <iostream>
#include "engine.h"
#include "selfplay/loop.h"
#include "utils/commandline.h"

int main(int argc, const char** argv) {
std::cerr << " _" << std::endl;
std::cerr << "| _ | |" << std::endl;
std::cerr << "|_ |_ |_| built " << __DATE__ << std::endl;
using namespace lczero;
CommandLine::Init(argc, argv);
CommandLine::RegisterMode("uci", "(default) Act as UCI engine");

#if CUDNN_EVAL != 1
// self-play not supported with cudnn version (I ran into compile issues)
CommandLine::RegisterMode("selfplay", "Play games with itself");

if (CommandLine::ConsumeCommand("selfplay")) {
// Selfplay mode.
SelfPlayLoop loop;
loop.RunLoop();
} else
#endif
{
} else {
// Consuming optional "uci" mode.
CommandLine::ConsumeCommand("uci");
// Ordinary UCI engine.
Expand Down
38 changes: 19 additions & 19 deletions lc0/src/mcts/search.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

#include "mcts/node.h"
#include "neural/cache.h"
#include "neural/network_tf.h"
#include "utils/random.h"

namespace lczero {
Expand All @@ -35,23 +34,22 @@ namespace {
const char* kMiniBatchSizeStr = "Minibatch size for NN inference";
const char* kMiniPrefetchBatchStr = "Max prefetch nodes, per NN call";
const char* kAggresiveCachingStr = "Try hard to find what to cache";
const char* kCpuctStr = "Cpuct MCTS option (x100)";
const char* kTemperatureStr = "Initial temperature (x100)";
const char* kTempDecayStr = "Per move temperature decay (x100)";
const char* kCpuctStr = "Cpuct MCTS option";
const char* kTemperatureStr = "Initial temperature";
const char* kTempDecayStr = "Per move temperature decay";
const char* kNoiseStr = "Add Dirichlet noise at root node";
const char* kVerboseStatsStr = "Display verbose move stats";

} // namespace

void Search::PopulateUciParams(OptionsParser* options) {
options->Add<SpinOption>(kMiniBatchSizeStr, 1, 1024, "minibatch-size") = 16;
options->Add<SpinOption>(kMiniPrefetchBatchStr, 0, 1024, "max-prefetch") = 64;
options->Add<CheckOption>(kAggresiveCachingStr, "aggressive-caching") = false;
options->Add<SpinOption>(kCpuctStr, 0, 9999, "cpuct") = 170;
options->Add<SpinOption>(kTemperatureStr, 0, 9999, "temperature", 'm') = 0;
options->Add<SpinOption>(kTempDecayStr, 0, 100, "tempdecay") = 0;
options->Add<CheckOption>(kNoiseStr, "noise", 'n') = false;
options->Add<CheckOption>(kVerboseStatsStr, "verbose-move-stats") = false;
options->Add<IntOption>(kMiniBatchSizeStr, 1, 1024, "minibatch-size") = 128;
options->Add<IntOption>(kMiniPrefetchBatchStr, 0, 1024, "max-prefetch") = 32;
options->Add<BoolOption>(kAggresiveCachingStr, "aggressive-caching") = false;
options->Add<FloatOption>(kCpuctStr, 0, 100, "cpuct") = 1.7;
options->Add<FloatOption>(kTemperatureStr, 0, 100, "temperature", 'm') = 0.0;
options->Add<FloatOption>(kTempDecayStr, 0, 1.00, "tempdecay") = 0.0;
options->Add<BoolOption>(kNoiseStr, "noise", 'n') = false;
options->Add<BoolOption>(kVerboseStatsStr, "verbose-move-stats") = false;
}

Search::Search(Node* root_node, NodePool* node_pool, Network* network,
Expand All @@ -70,9 +68,9 @@ Search::Search(Node* root_node, NodePool* node_pool, Network* network,
kMiniBatchSize(options.Get<int>(kMiniBatchSizeStr)),
kMiniPrefetchBatch(options.Get<int>(kMiniPrefetchBatchStr)),
kAggresiveCaching(options.Get<bool>(kAggresiveCachingStr)),
kCpuct(options.Get<int>(kCpuctStr) / 100.0f),
kTemperature(options.Get<int>(kTemperatureStr) / 100.0f),
kTempDecay(options.Get<int>(kTempDecayStr) / 100.0f),
kCpuct(options.Get<float>(kCpuctStr)),
kTemperature(options.Get<float>(kTemperatureStr)),
kTempDecay(options.Get<float>(kTempDecayStr)),
kNoise(options.Get<bool>(kNoiseStr)),
kVerboseStats(options.Get<bool>(kVerboseStatsStr)) {}

Expand Down Expand Up @@ -287,7 +285,9 @@ int Search::PrefetchIntoCache(Node* node, int budget,
std::vector<ScoredNode> scores;
float factor = kCpuct * std::sqrt(std::max(node->n, 1u));
for (Node* iter = node->child; iter; iter = iter->sibling) {
scores.emplace_back(factor * iter->ComputeU() + iter->ComputeQ(), iter);
if (iter->p == 0.0f) continue;
// Flipping sign of a score to be able to easily sort.
scores.emplace_back(-factor * iter->ComputeU() - iter->ComputeQ(), iter);
}

int first_unsorted_index = 0;
Expand All @@ -311,7 +311,8 @@ int Search::PrefetchIntoCache(Node* node, int budget,
Node* n = scores[i].second;
// Last node gets the same budget as prev-to-last node.
if (i != scores.size() - 1) {
const float next_score = scores[i + 1].first;
// Sign of the score was flipped for sorting, flipping back.
const float next_score = -scores[i + 1].first;
const float q = n->ComputeQ();
if (next_score > q) {
budget_to_spend = std::min(
Expand Down Expand Up @@ -441,7 +442,6 @@ void Search::SendMovesStats() const {
void Search::MaybeTriggerStop() {
Mutex::Lock lock(counters_mutex_);
SharedMutex::Lock nodes_lock(nodes_mutex_);
if (stop_) return;
// Don't stop when the root node is not yet expanded.
if (total_playouts_ == 0) return;
// Stop if reached playouts limit.
Expand Down
59 changes: 59 additions & 0 deletions lc0/src/neural/factory.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
/*
This file is part of Leela Chess Zero.
Copyright (C) 2018 The LCZero Authors
Leela Chess is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Leela Chess is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Leela Chess. If not, see <http://www.gnu.org/licenses/>.
*/

#include "neural/factory.h"

#include <algorithm>
#include <iostream>

namespace lczero {

NetworkFactory* NetworkFactory::Get() {
static NetworkFactory factory;
return &factory;
}

NetworkFactory::Register::Register(const std::string& name, FactoryFunc factory,
int priority) {
NetworkFactory::Get()->RegisterNetwork(name, factory, priority);
};

void NetworkFactory::RegisterNetwork(const std::string& name,
FactoryFunc factory, int priority) {
factories_.emplace_back(name, factory, priority);
std::sort(factories_.begin(), factories_.end());
}

std::vector<std::string> NetworkFactory::GetBackendsList() const {
std::vector<std::string> result;
for (const auto& x : factories_) result.emplace_back(x.name);
return result;
}

std::unique_ptr<Network> NetworkFactory::Create(const std::string& network,
const Weights& weights,
const OptionsDict& options) {
for (const auto& factory : factories_) {
if (factory.name == network) {
return factory.factory(weights, options);
}
}
throw Exception("Unknown backend: " + network);
}

} // namespace lczero
Loading

0 comments on commit 57e2b3c

Please sign in to comment.