From 833a2e2bc09e3640440766683043134d72bffd51 Mon Sep 17 00:00:00 2001 From: FauziAkram Date: Sat, 30 Dec 2023 15:22:17 +0300 Subject: [PATCH] Cleanup comments Tests used to derive some Elo worth comments: https://tests.stockfishchess.org/tests/view/656a7f4e136acbc573555a31 https://tests.stockfishchess.org/tests/view/6585fb455457644dc984620f closes https://github.com/official-stockfish/Stockfish/pull/4945 No functional change --- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/workflows/codeql.yml | 2 +- .github/workflows/stockfish_binaries.yml | 6 +++--- src/incbin/incbin.h | 10 +++++----- src/nnue/features/half_ka_v2_hm.h | 14 +++++++------- src/nnue/layers/affine_transform_sparse_input.h | 2 +- src/nnue/layers/sqr_clipped_relu.h | 2 +- src/nnue/nnue_common.h | 16 ++++++++-------- src/nnue/nnue_feature_transformer.h | 6 +++--- src/search.cpp | 8 ++++---- tests/instrumented.sh | 6 +++--- 11 files changed, 37 insertions(+), 37 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 1f8694d2e6f..0666eb32fb0 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,7 +2,7 @@ blank_issues_enabled: false contact_links: - name: Discord server url: https://discord.gg/GWDRS3kU6R - about: Feel free to ask for support or have a chat with us in our Discord server! + about: Feel free to ask for support or have a chat with us on our Discord server! - name: Discussions, Q&A, ideas, show us something... url: https://github.com/official-stockfish/Stockfish/discussions/new about: Do you have an idea for Stockfish? Do you want to show something that you made? Please open a discussion about it! diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 054be90040c..d6da8a1c288 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,7 +23,7 @@ jobs: matrix: language: [ 'cpp' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'java' to analyze code written in Java, Kotlin, or both # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support diff --git a/.github/workflows/stockfish_binaries.yml b/.github/workflows/stockfish_binaries.yml index 5b3a522625c..eff2c2c9471 100644 --- a/.github/workflows/stockfish_binaries.yml +++ b/.github/workflows/stockfish_binaries.yml @@ -172,8 +172,8 @@ jobs: name: stockfish-${{ matrix.config.os }}-${{ matrix.binaries }} path: stockfish-${{ matrix.config.simple_name }}-${{ matrix.binaries }}.tar - # Artifacts automatically get zipped - # to avoid double zipping, we use the unzipped directory + # Artifacts automatically get zipped. + # To avoid double-zipping, we use the unzipped directory - name: Upload binaries if: runner.os == 'Windows' uses: actions/upload-artifact@v3 @@ -195,7 +195,7 @@ jobs: id: commit_date run: echo "COMMIT_DATE=$(git show -s --date=format:'%Y%m%d' --format=%cd HEAD)" >> $GITHUB_ENV - # Make sure that an old ci which still runs on master doesn't recreate a prerelease + # Make sure that an old ci that still runs on master doesn't recreate a prerelease - name: Check Pullable Commits id: check_commits run: | diff --git a/src/incbin/incbin.h b/src/incbin/incbin.h index c19684d7242..18718b95fae 100644 --- a/src/incbin/incbin.h +++ b/src/incbin/incbin.h @@ -3,8 +3,8 @@ * @author Dale Weiler * @brief Utility for including binary files * - * Facilities for including binary files into the current translation unit and - * making use from them externally in other translation units. + * Facilities for including binary files into the current translation unit + * and making use of them externally in other translation units. */ #ifndef INCBIN_HDR #define INCBIN_HDR @@ -139,7 +139,7 @@ #endif #if defined(__APPLE__) -/* The directives are different for Apple branded compilers */ +/* The directives are different for Apple-branded compilers */ # define INCBIN_SECTION INCBIN_OUTPUT_SECTION "\n" # define INCBIN_GLOBAL(NAME) ".globl " INCBIN_MANGLE INCBIN_STRINGIZE(INCBIN_PREFIX) #NAME "\n" # define INCBIN_INT ".long " @@ -261,8 +261,8 @@ INCBIN_STRINGIZE( \ INCBIN_STYLE_IDENT(TYPE)) \ -/* Generate the global labels by indirectly invoking the macro with our style - * type and concatenating the name against them. */ +/* Generate the global labels by indirectly invoking the macro + * with our style type and concatenate the name against them. */ #define INCBIN_GLOBAL_LABELS(NAME, TYPE) \ INCBIN_INVOKE( \ INCBIN_GLOBAL, \ diff --git a/src/nnue/features/half_ka_v2_hm.h b/src/nnue/features/half_ka_v2_hm.h index 540ff895a5a..c208e38dbad 100644 --- a/src/nnue/features/half_ka_v2_hm.h +++ b/src/nnue/features/half_ka_v2_hm.h @@ -34,11 +34,11 @@ class Position; namespace Stockfish::Eval::NNUE::Features { -// Feature HalfKAv2_hm: Combination of the position of own king -// and the position of pieces. Position mirrored such that king always on e..h files. +// Feature HalfKAv2_hm: Combination of the position of own king and the +// position of pieces. Position mirrored such that king is always on e..h files. class HalfKAv2_hm { - // unique number for each piece type on each square + // Unique number for each piece type on each square enum { PS_NONE = 0, PS_W_PAWN = 0, @@ -56,8 +56,8 @@ class HalfKAv2_hm { }; static constexpr IndexType PieceSquareIndex[COLOR_NB][PIECE_NB] = { - // convention: W - us, B - them - // viewed from other side, W and B are reversed + // Convention: W - us, B - them + // Viewed from other side, W and B are reversed {PS_NONE, PS_W_PAWN, PS_W_KNIGHT, PS_W_BISHOP, PS_W_ROOK, PS_W_QUEEN, PS_KING, PS_NONE, PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_KING, PS_NONE}, {PS_NONE, PS_B_PAWN, PS_B_KNIGHT, PS_B_BISHOP, PS_B_ROOK, PS_B_QUEEN, PS_KING, PS_NONE, @@ -140,8 +140,8 @@ class HalfKAv2_hm { static int update_cost(const StateInfo* st); static int refresh_cost(const Position& pos); - // Returns whether the change stored in this StateInfo means that - // a full accumulator refresh is required. + // Returns whether the change stored in this StateInfo means + // that a full accumulator refresh is required. static bool requires_refresh(const StateInfo* st, Color perspective); }; diff --git a/src/nnue/layers/affine_transform_sparse_input.h b/src/nnue/layers/affine_transform_sparse_input.h index 6cb4d1a9347..70dbd790469 100644 --- a/src/nnue/layers/affine_transform_sparse_input.h +++ b/src/nnue/layers/affine_transform_sparse_input.h @@ -235,7 +235,7 @@ class AffineTransformSparseInput { const auto input32 = reinterpret_cast(input); - // Find indices of nonzero 32bit blocks + // Find indices of nonzero 32-bit blocks find_nnz(input32, nnz, count); const outvec_t* biasvec = reinterpret_cast(biases); diff --git a/src/nnue/layers/sqr_clipped_relu.h b/src/nnue/layers/sqr_clipped_relu.h index f8e2d497ac0..b9d8f030a24 100644 --- a/src/nnue/layers/sqr_clipped_relu.h +++ b/src/nnue/layers/sqr_clipped_relu.h @@ -91,7 +91,7 @@ class SqrClippedReLU { for (IndexType i = Start; i < InputDimensions; ++i) { output[i] = static_cast( - // Really should be /127 but we need to make it fast so we right shift + // Really should be /127 but we need to make it fast so we right-shift // by an extra 7 bits instead. Needs to be accounted for in the trainer. std::min(127ll, ((long long) (input[i]) * input[i]) >> (2 * WeightScaleBits + 7))); } diff --git a/src/nnue/nnue_common.h b/src/nnue/nnue_common.h index f9cd7fbb597..d4bd0028969 100644 --- a/src/nnue/nnue_common.h +++ b/src/nnue/nnue_common.h @@ -112,7 +112,7 @@ inline IntType read_little_endian(std::istream& stream) { // Utility to write an integer (signed or unsigned, any size) // to a stream in little-endian order. We swap the byte order before the write if -// necessary to always write in little endian order, independently of the byte +// necessary to always write in little-endian order, independently of the byte // ordering of the compiling machine. template inline void write_little_endian(std::ostream& stream, IntType value) { @@ -141,8 +141,8 @@ inline void write_little_endian(std::ostream& stream, IntType value) { } -// Read integers in bulk from a little indian stream. -// This reads N integers from stream s and put them in array out. +// Read integers in bulk from a little-endian stream. +// This reads N integers from stream s and puts them in array out. template inline void read_little_endian(std::istream& stream, IntType* out, std::size_t count) { if (IsLittleEndian) @@ -153,7 +153,7 @@ inline void read_little_endian(std::istream& stream, IntType* out, std::size_t c } -// Write integers in bulk to a little indian stream. +// Write integers in bulk to a little-endian stream. // This takes N integers from array values and writes them on stream s. template inline void write_little_endian(std::ostream& stream, const IntType* values, std::size_t count) { @@ -165,8 +165,8 @@ inline void write_little_endian(std::ostream& stream, const IntType* values, std } -// Read N signed integers from the stream s, putting them in -// the array out. The stream is assumed to be compressed using the signed LEB128 format. +// Read N signed integers from the stream s, putting them in the array out. +// The stream is assumed to be compressed using the signed LEB128 format. // See https://en.wikipedia.org/wiki/LEB128 for a description of the compression scheme. template inline void read_leb_128(std::istream& stream, IntType* out, std::size_t count) { @@ -216,8 +216,8 @@ inline void read_leb_128(std::istream& stream, IntType* out, std::size_t count) // Write signed integers to a stream with LEB128 compression. -// This takes N integers from array values, compress them with the LEB128 algorithm and -// writes the result on the stream s. +// This takes N integers from array values, compresses them with +// the LEB128 algorithm and writes the result on the stream s. // See https://en.wikipedia.org/wiki/LEB128 for a description of the compression scheme. template inline void write_leb_128(std::ostream& stream, const IntType* values, std::size_t count) { diff --git a/src/nnue/nnue_feature_transformer.h b/src/nnue/nnue_feature_transformer.h index 2af80f07792..a83a77c9d71 100644 --- a/src/nnue/nnue_feature_transformer.h +++ b/src/nnue/nnue_feature_transformer.h @@ -366,14 +366,14 @@ class FeatureTransformer { // The size must be enough to contain the largest possible update. // That might depend on the feature set and generally relies on the - // feature set's update cost calculation to be correct and never - // allow updates with more added/removed features than MaxActiveDimensions. + // feature set's update cost calculation to be correct and never allow + // updates with more added/removed features than MaxActiveDimensions. FeatureSet::IndexList removed[N - 1], added[N - 1]; { int i = N - - 2; // last potential state to update. Skip last element because it must be nullptr. + - 2; // Last potential state to update. Skip last element because it must be nullptr. while (states_to_update[i] == nullptr) --i; diff --git a/src/search.cpp b/src/search.cpp index 4e12a6c925a..eb63ec90762 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -747,7 +747,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo tte->save(posKey, VALUE_NONE, ss->ttPv, BOUND_NONE, DEPTH_NONE, MOVE_NONE, eval); } - // Use static evaluation difference to improve quiet move ordering (~4 Elo) + // Use static evaluation difference to improve quiet move ordering (~9 Elo) if (is_ok((ss - 1)->currentMove) && !(ss - 1)->inCheck && !priorCapture) { int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1652, 1546); @@ -1201,6 +1201,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo if (newDepth > d) value = -search(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode); + // Post LMR continuation history updates (~1 Elo) int bonus = value <= alpha ? -stat_malus(newDepth) : value >= beta ? stat_bonus(newDepth) : 0; @@ -1216,7 +1217,7 @@ Value search(Position& pos, Stack* ss, Value alpha, Value beta, Depth depth, boo if (!ttMove) r += 2; - // Note that if expected reduction is high, we reduce search depth by 1 here + // Note that if expected reduction is high, we reduce search depth by 1 here (~9 Elo) value = -search(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3), !cutNode); } @@ -1644,8 +1645,7 @@ Value value_to_tt(Value v, int ply) { // from the transposition table (which refers to the plies to mate/be mated from // current position) to "plies to mate/be mated (TB win/loss) from the root". // However, to avoid potentially false mate or TB scores related to the 50 moves rule -// and the graph history interaction, we return highest non-TB score instead. - +// and the graph history interaction, we return the highest non-TB score instead. Value value_from_tt(Value v, int ply, int r50c) { if (v == VALUE_NONE) diff --git a/tests/instrumented.sh b/tests/instrumented.sh index 637d19f9d63..2a3eadc074e 100755 --- a/tests/instrumented.sh +++ b/tests/instrumented.sh @@ -1,5 +1,5 @@ #!/bin/bash -# check for errors under valgrind or sanitizers. +# check for errors under Valgrind or sanitizers. error() { @@ -151,7 +151,7 @@ cat << EOF > game.exp send "quit\n" expect eof - # return error code of the spawned program, useful for valgrind + # return error code of the spawned program, useful for Valgrind lassign [wait] pid spawnid os_error_flag value exit \$value EOF @@ -179,7 +179,7 @@ cat << EOF > syzygy.exp send "quit\n" expect eof - # return error code of the spawned program, useful for valgrind + # return error code of the spawned program, useful for Valgrind lassign [wait] pid spawnid os_error_flag value exit \$value EOF