Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Changelog for v0.17.0-rc1 #273

Merged
merged 13 commits into from
Aug 19, 2018
2 changes: 1 addition & 1 deletion changelog.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ New visible features:

syzygy-paths=/path/to/syzygy/

* In uci info, depth is not average depth rather than full depth
* In uci info, "depth" is now average depth rather than full depth
(which was 4 all the time).
Also, depth values do include reused tree, only nodes visited during the
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Er uh this should say "do not include reused tree"

current search session.
Expand Down
2 changes: 1 addition & 1 deletion src/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
#include "neural/network.h"
#include "syzygy/syzygy.h"
#include "utils/mutex.h"
#include "utils/optionsparser.h"
#include "utils/optional.h"
#include "utils/optionsparser.h"

// CUDNN eval
// comment/disable this to enable tensor flow path
Expand Down
3 changes: 2 additions & 1 deletion src/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@
int main(int argc, const char** argv) {
std::cerr << " _" << std::endl;
std::cerr << "| _ | |" << std::endl;
std::cerr << "|_ |_ |_| v" << GetVersionStr() << " built " << __DATE__ << std::endl;
std::cerr << "|_ |_ |_| v" << GetVersionStr() << " built " << __DATE__
<< std::endl;
using namespace lczero;
CommandLine::Init(argc, argv);
CommandLine::RegisterMode("uci", "(default) Act as UCI engine");
Expand Down
4 changes: 1 addition & 3 deletions src/mcts/node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,7 @@ void Node::CreateEdges(const MoveList& moves) {
Node::ConstIterator Node::Edges() const { return {edges_, &child_}; }
Node::Iterator Node::Edges() { return {edges_, &child_}; }

float Node::GetVisitedPolicy() const { return visited_policy_; }

float Node::GetVisitedPolicy() const { return visited_policy_; }

Edge* Node::GetEdgeToNode(const Node* node) const {
assert(node->parent_ == this);
Expand Down Expand Up @@ -247,7 +246,6 @@ void Node::FinalizeScoreUpdate(float v) {
--n_in_flight_;
}


Node::NodeRange Node::ChildNodes() const { return child_.get(); }

void Node::ReleaseChildren() { gNodeGc.AddToGcQueue(std::move(child_)); }
Expand Down
12 changes: 6 additions & 6 deletions src/mcts/search.cc
Original file line number Diff line number Diff line change
Expand Up @@ -387,8 +387,8 @@ std::pair<Move, Move> Search::GetBestMoveInternal() const
Move ponder_move; // Default is "null move" which means "don't display
// anything".
if (best_node.HasNode() && best_node.node()->HasChildren()) {
ponder_move =
GetBestChildNoTemperature(best_node.node()).GetMove(!played_history_.IsBlackToMove());
ponder_move = GetBestChildNoTemperature(best_node.node())
.GetMove(!played_history_.IsBlackToMove());
}
return {best_node.GetMove(played_history_.IsBlackToMove()), ponder_move};
}
Expand Down Expand Up @@ -705,12 +705,12 @@ void SearchWorker::ExtendNode(Node* node) {
node->MakeTerminal(GameResult::DRAW);
return;
}

// Neither by-position or by-rule termination, but maybe it's a TB position.
if (search_->syzygy_tb_ && board.castlings().no_legal_castle() &&
history_.Last().GetNoCaptureNoPawnPly() == 0 &&
(board.ours() + board.theirs()).count() <=
search_->syzygy_tb_->max_cardinality()) {
search_->syzygy_tb_->max_cardinality()) {
ProbeState state;
WDLScore wdl = search_->syzygy_tb_->probe_wdl(history_.Last(), &state);
// Only fail state means the WDL is wrong, probe_wdl may produce correct
Expand All @@ -721,8 +721,8 @@ void SearchWorker::ExtendNode(Node* node) {
node->MakeTerminal(GameResult::BLACK_WON);
} else if (wdl == WDL_LOSS) {
node->MakeTerminal(GameResult::WHITE_WON);
} else { // Cursed wins and blessed losses count as draws.
node->MakeTerminal(GameResult::DRAW);
} else { // Cursed wins and blessed losses count as draws.
node->MakeTerminal(GameResult::DRAW);
}
search_->tb_hits_.fetch_add(1, std::memory_order_acq_rel);
return;
Expand Down
1 change: 0 additions & 1 deletion src/neural/blas/convolution1.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ void Convolution1::Forward(const size_t batch_size, const size_t input_channels,
0.0f, // beta
batch_output, // C
kSquares); // ldc, leading rank of B

}
}

Expand Down
2 changes: 1 addition & 1 deletion src/neural/blas/convolution1.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ class Convolution1 {
static constexpr auto kHeight = 8;
static constexpr auto kSquares = kWidth * kHeight;
};
}
} // namespace lczero
2 changes: 1 addition & 1 deletion src/neural/blas/fully_connected_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,4 @@ class FullyConnectedLayer {
static constexpr auto kSquares = kWidth * kHeight;
};

} // lczero
} // namespace lczero
2 changes: 1 addition & 1 deletion src/neural/blas/network_blas.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
along with Leela Chess. If not, see <http://www.gnu.org/licenses/>.
*/

#include "neural/network.h"
#include "neural/blas/batchnorm.h"
#include "neural/blas/blas.h"
#include "neural/blas/convolution1.h"
#include "neural/blas/fully_connected_layer.h"
#include "neural/blas/winograd_convolution3.h"
#include "neural/factory.h"
#include "neural/network.h"

#include <algorithm>
#include <cassert>
Expand Down
2 changes: 1 addition & 1 deletion src/neural/blas/winograd_convolution3.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,4 +83,4 @@ class WinogradConvolution3 {
std::vector<float> V_;
std::vector<float> M_;
};
}
} // namespace lczero
34 changes: 14 additions & 20 deletions src/neural/blas/winograd_transform.ispc
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,16 @@ uniform const size_t kWidth = 8;
uniform const size_t kHeight = 8;
uniform const size_t kSquares = kWidth * kHeight;

uniform const size_t kWtiles = 4; //(kWidth + 1) / 2;
uniform const size_t kTiles = kWtiles * kWtiles; // 16
uniform const size_t kWtiles = 4; //(kWidth + 1) / 2;
uniform const size_t kTiles = kWtiles * kWtiles; // 16

uniform const size_t kWinogradAlpha = 4;
uniform const size_t kWinogradTile = kWinogradAlpha * kWinogradAlpha;

export void winograd_TransformIn_ispc(uniform size_t batch_size,
const uniform float input[],
uniform size_t channels,
uniform float output[])
{

uniform float output[]) {
float x[kWinogradAlpha][kWinogradAlpha];
float T1[kWinogradAlpha][kWinogradAlpha];

Expand All @@ -47,18 +45,16 @@ export void winograd_TransformIn_ispc(uniform size_t batch_size,
const uniform int yin = 2 * block_y - 1;
const uniform int xin = 2 * block_x - 1;

foreach(channel = 0 ... channels) {
foreach (channel = 0 ... channels) {
size_t V_channel = V_batch + channel;
size_t input_channel = input_batch + channel * (kWidth * kHeight);

for (uniform int i = 0; i < kWinogradAlpha; i++) {
for (uniform int j = 0; j < kWinogradAlpha; j++) {
if ((yin + i) >= 0 && (xin + j) >= 0 &&
(yin + i) < kHeight && (xin + j) < kWidth) {
x[i][j] = input[input_channel +
(yin + i) * kWidth + (xin + j)];
}
else {
if ((yin + i) >= 0 && (xin + j) >= 0 && (yin + i) < kHeight &&
(xin + j) < kWidth) {
x[i][j] = input[input_channel + (yin + i) * kWidth + (xin + j)];
} else {
x[i][j] = 0.0f;
}
}
Expand All @@ -82,8 +78,8 @@ export void winograd_TransformIn_ispc(uniform size_t batch_size,
T1[3][3] = x[1][3] - x[3][3];

const size_t V_incr = channels * kTiles * batch_size;
const size_t wTile_V = V_channel +
channels * (block_y * kWtiles + block_x);
const size_t wTile_V =
V_channel + channels * (block_y * kWtiles + block_x);

output[wTile_V + V_incr * 0] = T1[0][0] - T1[0][2];
output[wTile_V + V_incr * 1] = T1[0][1] + T1[0][2];
Expand All @@ -107,11 +103,10 @@ export void winograd_TransformIn_ispc(uniform size_t batch_size,
}
}


export void winograd_TransformOut_ispc(uniform size_t batch_size,
const uniform float input[], uniform size_t channels,
uniform float output[])
{
const uniform float input[],
uniform size_t channels,
uniform float output[]) {
float m[kWinogradTile];

for (uniform size_t batch_index = 0; batch_index < batch_size;
Expand All @@ -132,7 +127,7 @@ export void winograd_TransformOut_ispc(uniform size_t batch_size,
const uniform int M_incr = channels * kTiles * batch_size;

for (uniform int wTile = 0; wTile < kWinogradTile; wTile++) {
m[wTile] = input[M_wtile + wTile*M_incr];
m[wTile] = input[M_wtile + wTile * M_incr];
}

float o11 = m[0 * 4 + 0] + m[0 * 4 + 1] + m[0 * 4 + 2] +
Expand Down Expand Up @@ -160,4 +155,3 @@ export void winograd_TransformOut_ispc(uniform size_t batch_size,
}
}
}

6 changes: 3 additions & 3 deletions src/neural/loader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@

namespace lczero {


namespace {
const std::uint32_t kWeightMagic = 0x1c0;

Expand Down Expand Up @@ -125,7 +124,8 @@ FloatVectors LoadFloatsFromPbFile(const std::string& buffer) {
net.min_version().patch());

if (net_ver > lc0_ver)
throw Exception("Invalid weight file: lc0 version >= " + min_version + " required.");
throw Exception("Invalid weight file: lc0 version >= " + min_version +
" required.");

if (net.format().weights_encoding() != pblczero::Format::LINEAR16)
throw Exception("Invalid weight file: wrong encoding.");
Expand Down Expand Up @@ -258,7 +258,7 @@ std::string DiscoverWeightsFile() {

// First byte of the protobuf stream is 0x0d for fixed32, so we ignore it as
// our own magic should suffice.
auto magic = reinterpret_cast<std::uint32_t*>(buf+1);
auto magic = reinterpret_cast<std::uint32_t*>(buf + 1);
if (*magic == kWeightMagic) {
std::cerr << "Found pb network file: " << candidate.second << std::endl;
return candidate.second;
Expand Down
2 changes: 1 addition & 1 deletion src/neural/network_check.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@
Program grant you additional permission to convey the resulting work.
*/

#include "neural/network.h"
#include "neural/factory.h"
#include "neural/network.h"
#include "utils/histogram.h"
#include "utils/random.h"

Expand Down
2 changes: 1 addition & 1 deletion src/neural/network_mux.cc
Original file line number Diff line number Diff line change
Expand Up @@ -208,5 +208,5 @@ void MuxingComputation::ComputeBlocking() {
} // namespace

REGISTER_NETWORK("multiplexing", MuxingNetwork, -1000)

} // namespace lczero
7 changes: 4 additions & 3 deletions src/neural/network_random.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,13 @@ namespace lczero {

class RandomNetworkComputation : public NetworkComputation {
public:
RandomNetworkComputation(int delay, int seed) : delay_ms_(delay), seed_(seed) {}
RandomNetworkComputation(int delay, int seed)
: delay_ms_(delay), seed_(seed) {}
void AddInput(InputPlanes&& input) override {
std::uint64_t hash = seed_;
for (const auto& plane : input) {
hash = HashCat({hash, plane.mask});
std::uint64_t value_hash =
std::uint64_t value_hash =
*reinterpret_cast<const std::uint32_t*>(&plane.value);
hash = HashCat({hash, value_hash});
}
Expand Down Expand Up @@ -71,7 +72,7 @@ class RandomNetworkComputation : public NetworkComputation {
class RandomNetwork : public Network {
public:
RandomNetwork(const Weights& /*weights*/, const OptionsDict& options)
: delay_ms_(options.GetOrDefault<int>("delay", 0)),
: delay_ms_(options.GetOrDefault<int>("delay", 0)),
seed_(options.GetOrDefault<int>("seed", 0)) {}
std::unique_ptr<NetworkComputation> NewComputation() override {
return std::make_unique<RandomNetworkComputation>(delay_ms_, seed_);
Expand Down
4 changes: 2 additions & 2 deletions src/neural/network_st_batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ class SingleThreadBatchingNetworkComputation : public NetworkComputation {

// Adds a sample to the parent batch.
void AddInput(InputPlanes&& input) override;
// May not actually compute immediately. Instead computes when all computations
// of the network called this.
// May not actually compute immediately. Instead computes when all
// computations of the network called this.
void ComputeBlocking() override;
// Returns how many times AddInput() was called.
int GetBatchSize() const override { return batch_size_; }
Expand Down
1 change: 0 additions & 1 deletion src/neural/opencl/OpenCLParams.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ struct OpenCLParams {
bool force_tune = false;
bool tune_exhaustive = false;
int tune_batch_size = 1;

};
4 changes: 2 additions & 2 deletions src/neural/opencl/OpenCLTuner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -350,8 +350,8 @@ std::string Tuner::tune_sgemm(const int m, const int n, const int k,
}
}
if (best_time == 0) {
std::cerr << "Failed to find a working configuration." << std::endl <<
"Check your OpenCL drivers." << std::endl;
std::cerr << "Failed to find a working configuration." << std::endl
<< "Check your OpenCL drivers." << std::endl;
throw std::runtime_error("Tuner failed to find working configuration.");
}
return best_params;
Expand Down
13 changes: 6 additions & 7 deletions src/neural/opencl/network_opencl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@
along with Leela Chess. If not, see <http://www.gnu.org/licenses/>.
*/

#include "neural/network.h"
#include "neural/blas/batchnorm.h"
#include "neural/blas/blas.h"
#include "neural/blas/fully_connected_layer.h"
#include "neural/blas/winograd_convolution3.h"
#include "neural/factory.h"
#include "neural/network.h"
#include "neural/opencl/OpenCL.h"
#include "neural/opencl/OpenCLParams.h"

Expand Down Expand Up @@ -105,9 +105,9 @@ class OpenCLComputation : public NetworkComputation {

// Now get the score.
auto winrate = FullyConnectedLayer::Forward0D(
num_value_channels, weights_.ip2_val_w.data(),
&output_val[j * num_value_channels]) +
weights_.ip2_val_b[0];
num_value_channels, weights_.ip2_val_w.data(),
&output_val[j * num_value_channels]) +
weights_.ip2_val_b[0];

q_values_.emplace_back(std::tanh(winrate));
}
Expand Down Expand Up @@ -162,7 +162,7 @@ class OpenCLNetwork : public Network {
params_.tune_only = options.GetOrDefault<bool>("tune_only", false);
params_.tune_exhaustive =
options.GetOrDefault<bool>("tune_exhaustive", false);

// By default batch size is 1, as many old cards may not support more.
auto max_batch_size_ =
static_cast<size_t>(options.GetOrDefault<int>("batch_size", 1));
Expand All @@ -178,7 +178,6 @@ class OpenCLNetwork : public Network {
// tune.
params_.tune_batch_size =
options.GetOrDefault<int>("tune_batch_size", max_batch_size_);


const auto inputChannels = static_cast<size_t>(kInputPlanes);
const auto channels = weights.input.biases.size();
Expand All @@ -196,7 +195,7 @@ class OpenCLNetwork : public Network {
// num_policy_input_planes = 32
// num_value_channels = 128
// num_output_policy = 1858

static constexpr auto kWinogradAlpha = 4;

opencl_.initialize(channels, params_);
Expand Down
2 changes: 1 addition & 1 deletion src/selfplay/game.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ void SelfPlayGame::Play(int white_threads, int black_threads,
*tree_[idx], options_[idx].network, options_[idx].best_move_callback,
options_[idx].info_callback, options_[idx].search_limits,
*options_[idx].uci_options, options_[idx].cache, nullptr);
// TODO: add Syzygy option for selfplay.
// TODO: add Syzygy option for selfplay.
}

// Do search.
Expand Down
2 changes: 1 addition & 1 deletion src/selfplay/game.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class SelfPlayGame {
static void PopulateUciParams(OptionsParser* options);

// Starts the game and blocks until the game is finished.
void Play(int white_threads, int black_threads, bool enable_resign=true);
void Play(int white_threads, int black_threads, bool enable_resign = true);
// Aborts the game currently played, doesn't matter if it's synchronous or
// not.
void Abort();
Expand Down
2 changes: 1 addition & 1 deletion src/selfplay/loop.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void SelfPlayLoop::SendGameInfo(const GameInfo& info) {
// and move list potentially contain spaces.
if (info.min_false_positive_threshold) {
std::string resign_res = "resign_report";
resign_res +=
resign_res +=
" fp_threshold " + std::to_string(*info.min_false_positive_threshold);
responses.push_back(resign_res);
}
Expand Down
Loading