From c09207cd497e250e8b3e7ad442cec3bc4181827e Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Mon, 23 Dec 2024 12:33:47 +0100 Subject: [PATCH 1/6] [test] Ensure that the first token generation is not included into TPOT (#1414) CVS-155098 --- src/cpp/src/perf_metrics.cpp | 2 +- tests/python_tests/conftest.py | 3 ++- tests/python_tests/test_generate_api.py | 10 +++++++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index 3bd6252c78..3725dc0cfc 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -101,7 +101,7 @@ void PerfMetrics::evaluate_statistics(std::optional start_time) { auto ttft = tok_times[0] - start_time_val; raw_metrics.m_times_to_first_token = std::vector(); - raw_metrics.m_times_to_first_token.emplace_back(ttft / batch_sizes[0]); + raw_metrics.m_times_to_first_token.emplace_back(ttft); num_generated_tokens = batch_sizes[0]; // The very first infer request (prefill stage) is slower than subsequent ones since we process a sequence of tokens. diff --git a/tests/python_tests/conftest.py b/tests/python_tests/conftest.py index f98f47ecf3..e159045601 100644 --- a/tests/python_tests/conftest.py +++ b/tests/python_tests/conftest.py @@ -3,7 +3,8 @@ def pytest_make_parametrize_id(config, val, argname): if argname in ['prompt', 'prompts', 'batched_prompts']: - return f'{val}' + # Print only first 1000 characters of long prompts. + return f'{val[:1000]}' elif argname == 'model_descr': return f"{val[0]}" elif argname == 'chat_config': diff --git a/tests/python_tests/test_generate_api.py b/tests/python_tests/test_generate_api.py index d15747be63..9bb9eff49c 100644 --- a/tests/python_tests/test_generate_api.py +++ b/tests/python_tests/test_generate_api.py @@ -798,6 +798,12 @@ def test_perf_metrics(model_descr, generation_config, prompt): assert (mean_ttft, std_ttft) == (perf_metrics.get_ttft().mean, perf_metrics.get_ttft().std) assert mean_ttft > 0 and mean_ttft < 1000.0 + raw_metrics = perf_metrics.raw_metrics + durations = np.array(raw_metrics.m_durations) / 1000 + # Check that prefill is not included in durations for TPOT calculation. + # For the very long prompt prefill is slow and TTFT is much larger than any other token genration duration. + assert np.all(mean_ttft > durations * 2) + mean_tpot, std_tpot = perf_metrics.get_tpot() assert (mean_tpot, std_tpot) == (perf_metrics.get_tpot().mean, perf_metrics.get_tpot().std) assert mean_tpot > 0 and mean_ttft < 1000.0 @@ -822,7 +828,9 @@ def test_perf_metrics(model_descr, generation_config, prompt): assert std_detok_duration == 0 # assert that calculating statistics manually from the raw counters we get the same restults as from PerfMetrics - raw_metrics = perf_metrics.raw_metrics + assert np.allclose(mean_tpot, np.mean(durations)) + assert np.allclose(std_tpot, np.std(durations)) + raw_dur = np.array(raw_metrics.generate_durations) / 1000 assert np.allclose(mean_gen_duration, np.mean(raw_dur)) assert np.allclose(std_gen_duration, np.std(raw_dur)) From 3496d453ee2a2dd1a0340247076ab64787094446 Mon Sep 17 00:00:00 2001 From: Ekaterina Shiryaeva Date: Mon, 23 Dec 2024 12:48:23 +0100 Subject: [PATCH 2/6] Add perf metrics support for WhisperStaticPipeline (#1337) --- src/cpp/src/whisper/whisper.cpp | 37 ++----------- src/cpp/src/whisper/whisper_utils.cpp | 46 ++++++++++++++++ src/cpp/src/whisper/whisper_utils.hpp | 22 ++++++++ src/cpp/src/whisper_pipeline_static.cpp | 70 +++++++++++++++++++++---- 4 files changed, 131 insertions(+), 44 deletions(-) create mode 100644 src/cpp/src/whisper/whisper_utils.cpp create mode 100644 src/cpp/src/whisper/whisper_utils.hpp diff --git a/src/cpp/src/whisper/whisper.cpp b/src/cpp/src/whisper/whisper.cpp index 9d6aa698ce..04993f288c 100644 --- a/src/cpp/src/whisper/whisper.cpp +++ b/src/cpp/src/whisper/whisper.cpp @@ -18,6 +18,7 @@ #include "whisper_config.hpp" #include "whisper_feature_extractor.hpp" #include "whisper_models.hpp" +#include "whisper_utils.hpp" using ov::genai::MicroSeconds; @@ -79,17 +80,6 @@ void set_past_key_value(ov::InferRequest& source, ov::InferRequest& dest) { } } -void infer_with_perf_metrics(ov::InferRequest& request, ov::genai::RawPerfMetrics& raw_metrics) { - const auto infer_start = std::chrono::steady_clock::now(); - request.infer(); - const auto infer_end = std::chrono::steady_clock::now(); - const auto infer_ms = ov::genai::PerfMetrics::get_microsec(infer_end - infer_start); - raw_metrics.m_inference_durations[0] += MicroSeconds(infer_ms); - raw_metrics.m_token_infer_durations.emplace_back(infer_ms); - raw_metrics.m_new_token_times.emplace_back(infer_end); - raw_metrics.m_batch_sizes.emplace_back(1); -} - int64_t decode(ov::Tensor& encoder_hidden_state, ov::InferRequest& decoder, std::vector& input_ids, @@ -102,7 +92,7 @@ int64_t decode(ov::Tensor& encoder_hidden_state, ov::Tensor input_ids_tensor(ov::element::i64, {1, input_ids.size()}, input_ids.data()); decoder.set_tensor("input_ids", input_ids_tensor); - infer_with_perf_metrics(decoder, raw_metrics); + ov::genai::utils::infer_with_perf_metrics(decoder, raw_metrics); auto output_tensor = decoder.get_tensor("logits"); @@ -138,7 +128,7 @@ int64_t decode_with_past(ov::Tensor& encoder_hidden_state, cache_position_tensor.set_shape({1}); cache_position_tensor.data()[0] = cache_position; - infer_with_perf_metrics(decoder_with_past, raw_metrics); + ov::genai::utils::infer_with_perf_metrics(decoder_with_past, raw_metrics); auto output_tensor = decoder_with_past.get_tensor("logits"); @@ -265,25 +255,6 @@ std::pair> full_decode(ov::Tensor& encoder_hidden_sta return {false, output_tokens}; } -template -void filter_by_ranges(std::vector& value, size_t offset, std::vector>& ranges) { - OPENVINO_ASSERT(ranges.empty() || value.size() >= (offset + ranges.back().second)); - std::vector result{value.begin(), value.begin() + offset}; - for (auto [start, end] : ranges) { - result.insert(result.end(), value.begin() + offset + start, value.begin() + offset + end); - } - - value = result; -} - -void filter_non_segment_metrics(ov::genai::RawPerfMetrics& raw_metrics, - size_t offset, - std::vector>& ranges) { - filter_by_ranges(raw_metrics.m_token_infer_durations, offset, ranges); - filter_by_ranges(raw_metrics.m_new_token_times, offset, ranges); - filter_by_ranges(raw_metrics.m_batch_sizes, offset, ranges); -} - } // namespace namespace ov { @@ -362,7 +333,7 @@ WhisperGenerateResult whisper_generate(const ov::genai::WhisperGenerationConfig& feature_extractor.nb_max_frames, time_precision); - filter_non_segment_metrics(raw_metrics, output_tokens.size(), extracted_segments.segment_ranges); + ov::genai::utils::filter_non_segment_metrics(raw_metrics, output_tokens.size(), extracted_segments.segment_ranges); segments.insert(segments.end(), extracted_segments.segments.begin(), extracted_segments.segments.end()); diff --git a/src/cpp/src/whisper/whisper_utils.cpp b/src/cpp/src/whisper/whisper_utils.cpp new file mode 100644 index 0000000000..6e56a1439d --- /dev/null +++ b/src/cpp/src/whisper/whisper_utils.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "whisper_utils.hpp" + +namespace { + +template +void filter_by_ranges(std::vector& value, size_t offset, std::vector>& ranges) { + OPENVINO_ASSERT(ranges.empty() || value.size() >= (offset + ranges.back().second)); + std::vector result{value.begin(), value.begin() + offset}; + for (auto [start, end] : ranges) { + result.insert(result.end(), value.begin() + offset + start, value.begin() + offset + end); + } + + value = result; +} + +} // namespace + +namespace ov { +namespace genai { +namespace utils { + +void infer_with_perf_metrics(ov::InferRequest& request, ov::genai::RawPerfMetrics& raw_metrics) { + const auto infer_start = std::chrono::steady_clock::now(); + request.infer(); + const auto infer_end = std::chrono::steady_clock::now(); + const auto infer_ms = ov::genai::PerfMetrics::get_microsec(infer_end - infer_start); + raw_metrics.m_inference_durations[0] += MicroSeconds(infer_ms); + raw_metrics.m_token_infer_durations.emplace_back(infer_ms); + raw_metrics.m_new_token_times.emplace_back(infer_end); + raw_metrics.m_batch_sizes.emplace_back(1); +} + +void filter_non_segment_metrics(ov::genai::RawPerfMetrics& raw_metrics, + size_t offset, + std::vector>& ranges) { + filter_by_ranges(raw_metrics.m_token_infer_durations, offset, ranges); + filter_by_ranges(raw_metrics.m_new_token_times, offset, ranges); + filter_by_ranges(raw_metrics.m_batch_sizes, offset, ranges); +} + +} // namespace utils +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/whisper/whisper_utils.hpp b/src/cpp/src/whisper/whisper_utils.hpp new file mode 100644 index 0000000000..234feed6a8 --- /dev/null +++ b/src/cpp/src/whisper/whisper_utils.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "openvino/genai/perf_metrics.hpp" + +namespace ov { +namespace genai { +namespace utils { + +void infer_with_perf_metrics(ov::InferRequest& request, ov::genai::RawPerfMetrics& raw_metrics); + +void filter_non_segment_metrics(ov::genai::RawPerfMetrics& raw_metrics, + size_t offset, + std::vector>& ranges); + +} // namespace utils +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/whisper_pipeline_static.cpp b/src/cpp/src/whisper_pipeline_static.cpp index dc26789846..cc61eb0659 100644 --- a/src/cpp/src/whisper_pipeline_static.cpp +++ b/src/cpp/src/whisper_pipeline_static.cpp @@ -14,6 +14,7 @@ #include "whisper/timestamps.hpp" #include "whisper/whisper.hpp" #include "whisper/whisper_config.hpp" +#include "whisper/whisper_utils.hpp" #include "openvino/core/layout.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" @@ -26,6 +27,8 @@ #include "openvino/op/convert.hpp" #include "openvino/op/parameter.hpp" +using ov::genai::MicroSeconds; + namespace { template @@ -44,7 +47,8 @@ void copy_to_tensor(const std::vector& src_vec, ov::Tensor dst_tensor) { ov::Tensor encode(ov::InferRequest& request, std::vector& mel_data, const size_t feature_size, - const size_t nb_max_frames) { + const size_t nb_max_frames, + ov::genai::RawPerfMetrics& raw_metrics) { OPENVINO_ASSERT(mel_data.size() == feature_size * nb_max_frames, "Mel spectrogram required size: ", feature_size, @@ -54,7 +58,12 @@ ov::Tensor encode(ov::InferRequest& request, mel_data.size(), "."); copy_to_tensor(mel_data, request.get_tensor("input_features")); + + const auto infer_start = std::chrono::steady_clock::now(); request.infer(); + const auto infer_ms = ov::genai::PerfMetrics::get_microsec(std::chrono::steady_clock::now() - infer_start); + raw_metrics.m_inference_durations[0] += MicroSeconds(infer_ms); + return request.get_tensor("last_hidden_state"); } @@ -140,13 +149,14 @@ int64_t decode(ov::Tensor& encoder_hidden_state, ov::InferRequest& decoder, const std::vector& init_ids, const ov::genai::WhisperGenerationConfig& config, + ov::genai::RawPerfMetrics& raw_metrics, const bool apply_logit_processors = true, const bool return_timestamps = false) { // NB: Fill decoder inputs encoder_hidden_state.copy_to(decoder.get_tensor("encoder_hidden_states")); set_decoder_input_ids_attention_mask(decoder, init_ids, config.pad_token_id); - decoder.infer(); + ov::genai::utils::infer_with_perf_metrics(decoder, raw_metrics); auto output_tensor = decoder.get_tensor("logits"); @@ -167,6 +177,7 @@ int64_t decode_with_past(ov::InferRequest& decoder_with_past, const int64_t input_id, const int64_t position_id, const ov::genai::WhisperGenerationConfig& config, + ov::genai::RawPerfMetrics& raw_metrics, const bool return_timestamps, const std::vector& generated_tokens) { // FIXME: Avoid this cast to i32. Why it's not i64 precision in model? @@ -175,7 +186,7 @@ int64_t decode_with_past(ov::InferRequest& decoder_with_past, // FIXME: Is "attention_mask" supposed to be f16? decoder_with_past.get_tensor("attention_mask").data()[position_id - 1] = 0u; - decoder_with_past.infer(); + ov::genai::utils::infer_with_perf_metrics(decoder_with_past, raw_metrics); auto output_tensor = decoder_with_past.get_tensor("logits"); ov::genai::do_suppress_tokens(output_tensor, 0, config.suppress_tokens); @@ -217,13 +228,17 @@ void prepare_decoder_with_past(ov::InferRequest& decoder_with_past, ov::InferReq int64_t detect_language(ov::Tensor& encoder_hidden_state, ov::InferRequest decoder, - const ov::genai::WhisperGenerationConfig& config) { + const ov::genai::WhisperGenerationConfig& config, + ov::genai::RawPerfMetrics& raw_metrics) { decoder.set_tensor("encoder_hidden_states", ov::Tensor{encoder_hidden_state}); std::vector init_ids{static_cast(config.decoder_start_token_id)}; set_decoder_input_ids_attention_mask(decoder, init_ids, config.pad_token_id); + const auto infer_start = std::chrono::steady_clock::now(); decoder.infer(); + const auto infer_ms = ov::genai::PerfMetrics::get_microsec(std::chrono::steady_clock::now() - infer_start); + raw_metrics.m_inference_durations[0] += MicroSeconds(infer_ms); auto output_tensor = decoder.get_tensor("logits"); @@ -246,7 +261,8 @@ int64_t detect_language(ov::Tensor& encoder_hidden_state, std::vector prepare_init_ids(ov::Tensor& encoder_hidden_state, ov::InferRequest& decoder, const ov::genai::WhisperGenerationConfig& config, - const bool return_timestamps) { + const bool return_timestamps, + ov::genai::RawPerfMetrics& raw_metrics) { if (!config.is_multilingual) { if (return_timestamps) { return std::vector{static_cast(config.decoder_start_token_id)}; @@ -263,7 +279,7 @@ std::vector prepare_init_ids(ov::Tensor& encoder_hidden_state, language_token_id = static_cast(config.lang_to_id.at(language)); } } else { - language_token_id = detect_language(encoder_hidden_state, decoder, config); + language_token_id = detect_language(encoder_hidden_state, decoder, config, raw_metrics); } int32_t task_token_id = static_cast(config.transcribe_token_id); @@ -289,8 +305,9 @@ std::pair> full_decode(ov::Tensor& encoder_hidden_sta std::vector init_ids, const size_t max_new_tokens, const bool return_timestamps, + ov::genai::RawPerfMetrics& raw_metrics, const std::shared_ptr streamer) { - int64_t output_token = decode(encoder_hidden_state, models.decoder, init_ids, config, true, return_timestamps); + int64_t output_token = decode(encoder_hidden_state, models.decoder, init_ids, config, raw_metrics, true, return_timestamps); std::vector output_tokens{output_token}; if (!return_timestamps && streamer && streamer->put(output_token)) { @@ -308,6 +325,7 @@ std::pair> full_decode(ov::Tensor& encoder_hidden_sta output_tokens.back(), i + init_ids.size(), config, + raw_metrics, return_timestamps, output_tokens); update_past_key_value(models.decoder_with_past, models.decoder_with_past, i + init_ids.size()); @@ -576,6 +594,7 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( const RawSpeechInput& raw_speech_input, OptionalWhisperGenerationConfig generation_config, ChunkStreamerVariant streamer) { + auto start_time = std::chrono::steady_clock::now(); WhisperGenerationConfig config = (generation_config.has_value()) ? *generation_config : m_generation_config; config.validate(); @@ -591,14 +610,25 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( streamer_ptr = std::make_shared(m_tokenizer, *callback); } + size_t max_new_tokens = config.get_max_new_tokens(); + + WhisperPerfMetrics perf_metrics; + perf_metrics.num_input_tokens = 0; + RawPerfMetrics& raw_metrics = perf_metrics.raw_metrics; + raw_metrics.m_new_token_times.reserve(max_new_tokens); + raw_metrics.m_batch_sizes.reserve(max_new_tokens); + raw_metrics.m_token_infer_durations.reserve(max_new_tokens); + raw_metrics.m_inference_durations = {{MicroSeconds(0.0f)}}; + + const auto extract_start = std::chrono::steady_clock::now(); auto input_features = m_feature_extractor.extract(raw_speech_input); + const auto extract_ms = ov::genai::PerfMetrics::get_microsec(std::chrono::steady_clock::now() - extract_start); + perf_metrics.whisper_raw_metrics.features_extraction_durations.emplace_back(extract_ms); const bool is_shortform = input_features.n_frames <= m_feature_extractor.nb_max_frames; // long-form audio processing requires timestamps to be enabled const bool return_timestamps = config.return_timestamps || !is_shortform; - size_t max_new_tokens = config.get_max_new_tokens(); - std::vector init_ids; std::vector output_tokens; std::vector segments; @@ -619,11 +649,12 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( ov::Tensor hidden_state_tensor = encode(m_models.encoder, input_features_chunk, m_feature_extractor.feature_size, - m_feature_extractor.nb_max_frames); + m_feature_extractor.nb_max_frames, + raw_metrics); // prepare init_ids just once for whole input if (init_ids.empty()) { - init_ids = prepare_init_ids(hidden_state_tensor, m_models.decoder, config, return_timestamps); + init_ids = prepare_init_ids(hidden_state_tensor, m_models.decoder, config, return_timestamps, raw_metrics); } auto [cancelled, chunk_output_tokens] = full_decode(hidden_state_tensor, @@ -632,6 +663,7 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( init_ids, max_new_tokens - output_tokens.size(), return_timestamps, + raw_metrics, streamer_ptr); if (return_timestamps) { @@ -640,6 +672,8 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( m_feature_extractor.nb_max_frames, time_precision); + ov::genai::utils::filter_non_segment_metrics(raw_metrics, output_tokens.size(), extracted_segments.segment_ranges); + segments.insert(segments.end(), extracted_segments.segments.begin(), extracted_segments.segments.end()); output_tokens.insert(output_tokens.end(), @@ -669,7 +703,11 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( streamer_ptr->end(); } + auto decode_start_time = std::chrono::steady_clock::now(); WhisperDecodedResults result{std::vector{m_tokenizer.decode(output_tokens)}, std::vector{1.f}}; + result.perf_metrics = perf_metrics; + result.perf_metrics.raw_metrics.detokenization_durations.emplace_back( + PerfMetrics::get_microsec(std::chrono::steady_clock::now() - decode_start_time)); // if return_timestamps wasn't enabled by user if (!config.return_timestamps) { @@ -681,13 +719,23 @@ WhisperDecodedResults WhisperPipeline::StaticWhisperPipeline::generate( chunks.reserve(segments.size()); for (auto& segment : segments) { + decode_start_time = std::chrono::steady_clock::now(); chunks.push_back( WhisperDecodedResultChunk{segment.m_start, segment.m_end, m_tokenizer.decode(segment.m_tokens)}); + result.perf_metrics.raw_metrics.detokenization_durations.emplace_back( + PerfMetrics::get_microsec(std::chrono::steady_clock::now() - decode_start_time)); } result.chunks = chunks; } + auto& metrics = result.perf_metrics; + metrics.load_time = this->m_load_time_ms; + auto stop_time = std::chrono::steady_clock::now(); + metrics.raw_metrics.generate_durations.emplace_back(PerfMetrics::get_microsec(stop_time - start_time)); + metrics.raw_metrics.tokenization_durations.emplace_back(MicroSeconds(0.0f)); + metrics.evaluate_statistics(start_time); + return result; } From d5921487836103b7e9f32c8577021ac2a4d9d912 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 24 Dec 2024 13:30:44 +0400 Subject: [PATCH 3/6] [Inpainting] Update stable_diffusion_xl_pipeline.hpp (#1427) --- src/cpp/src/image_generation/stable_diffusion_xl_pipeline.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/src/image_generation/stable_diffusion_xl_pipeline.hpp b/src/cpp/src/image_generation/stable_diffusion_xl_pipeline.hpp index 15f15219c2..c3ebcdf1f4 100644 --- a/src/cpp/src/image_generation/stable_diffusion_xl_pipeline.hpp +++ b/src/cpp/src/image_generation/stable_diffusion_xl_pipeline.hpp @@ -320,7 +320,7 @@ class StableDiffusionXLPipeline : public StableDiffusionPipeline { } else if (m_pipeline_type == PipelineType::INPAINTING) { m_generation_config.guidance_scale = 7.5f; m_generation_config.num_inference_steps = 50; - m_generation_config.strength == 0.9999f; + m_generation_config.strength = 0.9999f; } } else { OPENVINO_THROW("Unsupported class_name '", class_name, "'. Please, contact OpenVINO GenAI developers"); From db28c8c5775fe61a03519355f433f9885460e9e3 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 24 Dec 2024 13:31:24 +0400 Subject: [PATCH 4/6] Fix compile warnings in tokenizer.cpp (#1428) ``` /Users/runner/work/openvino.genai/openvino.genai/src/cpp/src/tokenizer.cpp:238:40: warning: expression result unused [-Wunused-value] encode("non empty string").input_ids; ``` --- src/cpp/src/tokenizer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/src/tokenizer.cpp b/src/cpp/src/tokenizer.cpp index ed6fbc0a06..5364acfd91 100644 --- a/src/cpp/src/tokenizer.cpp +++ b/src/cpp/src/tokenizer.cpp @@ -235,7 +235,7 @@ class Tokenizer::TokenizerImpl { // Initialize tokenizer's cache to save time later. if (m_tokenizer) { // TODO CVS-150630: Empty strings sporadically can fail, therefore use nonempty string for warmup. - encode("non empty string").input_ids; + encode("non empty string"); } if (m_detokenizer) { decode({1, 33, 199, 42, 42}); From 0da48cd1fdb3dd9620b0a0f4d494d64d78d3a491 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Tue, 24 Dec 2024 21:34:34 +0400 Subject: [PATCH 5/6] Revert "Pin optimum-intel commit" (#1426) Reverts openvinotoolkit/openvino.genai#1420 Fixed here https://github.com/huggingface/optimum-intel/pull/1091 --- .github/workflows/llm_bench-python.yml | 4 ++-- samples/export-requirements.txt | 2 +- tests/python_tests/requirements.txt | 5 +++-- tools/llm_bench/requirements.txt | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/llm_bench-python.yml b/.github/workflows/llm_bench-python.yml index 8356805e19..1999bafcfe 100644 --- a/.github/workflows/llm_bench-python.yml +++ b/.github/workflows/llm_bench-python.yml @@ -151,7 +151,7 @@ jobs: rm -rf ./ov_models/internvl2-1B - name: WWB Tests run: | - pip install git+https://github.com/huggingface/optimum-intel.git@420fa87d039425a906b7f755e4562b65947f016a + pip install git+https://github.com/huggingface/optimum-intel.git GIT_CLONE_PROTECTION_ACTIVE=false PIP_PRE=1 PIP_EXTRA_INDEX_URL=https://storage.openvinotoolkit.org/simple/wheels/nightly pip install ${{ env.WWB_PATH }} python -m pytest -v ${{ env.WWB_PATH }}/tests stateful: @@ -190,7 +190,7 @@ jobs: - name: WWB Tests run: | pip install pytest - pip install git+https://github.com/huggingface/optimum-intel.git@420fa87d039425a906b7f755e4562b65947f016a + pip install git+https://github.com/huggingface/optimum-intel.git GIT_CLONE_PROTECTION_ACTIVE=false PIP_PRE=1 PIP_EXTRA_INDEX_URL=https://storage.openvinotoolkit.org/simple/wheels/nightly pip install ${{ env.WWB_PATH }} python -m pytest -v ${{ env.WWB_PATH }}/tests diff --git a/samples/export-requirements.txt b/samples/export-requirements.txt index d75fdbacee..797b680b9a 100644 --- a/samples/export-requirements.txt +++ b/samples/export-requirements.txt @@ -2,7 +2,7 @@ --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/pre-release --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly openvino-tokenizers~=2025.0.0.0.dev -optimum-intel @ git+https://github.com/huggingface/optimum-intel.git@420fa87d039425a906b7f755e4562b65947f016a +optimum-intel @ git+https://github.com/huggingface/optimum-intel.git numpy<2.0.0; sys_platform == 'darwin' einops==0.8.0 # For Qwen transformers_stream_generator==0.0.5 # For Qwen diff --git a/tests/python_tests/requirements.txt b/tests/python_tests/requirements.txt index bc5324b211..00bffb6646 100644 --- a/tests/python_tests/requirements.txt +++ b/tests/python_tests/requirements.txt @@ -1,6 +1,7 @@ --extra-index-url https://download.pytorch.org/whl/cpu -optimum-intel @ git+https://github.com/huggingface/optimum-intel.git@420fa87d039425a906b7f755e4562b65947f016a -numpy<2.0.0; sys_platform == 'darwin' +diffusers==0.31.0 +optimum-intel @ git+https://github.com/huggingface/optimum-intel.git +numpy<2.0.0; platform_system == "Darwin" and platform_machine == "x86_64" onnx==1.17.0 pytest diff --git a/tools/llm_bench/requirements.txt b/tools/llm_bench/requirements.txt index acbc668c52..f5f4a3fdeb 100644 --- a/tools/llm_bench/requirements.txt +++ b/tools/llm_bench/requirements.txt @@ -10,7 +10,7 @@ torch transformers>=4.40.0 diffusers>=0.22.0 #optimum is in dependency list of optimum-intel -git+https://github.com/huggingface/optimum-intel.git@420fa87d039425a906b7f755e4562b65947f016a#egg=optimum-intel +git+https://github.com/huggingface/optimum-intel.git@main#egg=optimum-intel git+https://github.com/openvinotoolkit/nncf.git@develop#egg=nncf packaging psutil From 021d88059d1367ef5ccc7938183de3dcdaafe82f Mon Sep 17 00:00:00 2001 From: Anastasiia Pnevskaia Date: Tue, 24 Dec 2024 19:43:58 +0100 Subject: [PATCH 6/6] Dynamic KV cache allocation (#1364) Dynamic KV cache allocation Ticket: CVS-158409 --------- Co-authored-by: Ilya Lavrenov --- .../prompt_lookup_decoding_lm.cpp | 6 +- .../speculative_decoding_lm.cpp | 6 +- .../prompt_lookup_decoding_lm.py | 5 +- .../speculative_decoding_lm.py | 6 +- src/cpp/src/block_manager.hpp | 51 ++++++- src/cpp/src/cache_manager.hpp | 124 +++++++++++++++--- src/cpp/src/continuous_batching_impl.cpp | 10 +- src/cpp/src/device_config.hpp | 36 ++--- src/cpp/src/llm_pipeline.cpp | 13 +- src/cpp/src/scheduler.hpp | 120 ++++++++++++++++- .../speculative_decoding_impl.cpp | 3 +- .../utils/paged_attention_transformations.cpp | 10 +- tests/cpp/cache_manager.cpp | 114 ++++++++++++++-- tests/cpp/scheduler.cpp | 59 ++++++--- tests/python_tests/common.py | 1 - tests/python_tests/ov_genai_test_utils.py | 1 - .../python_tests/test_cache_optimizations.py | 27 +++- 17 files changed, 480 insertions(+), 112 deletions(-) diff --git a/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp b/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp index e692110027..8b48dbade0 100644 --- a/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp +++ b/samples/cpp/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.cpp @@ -22,14 +22,10 @@ int main(int argc, char* argv[]) try { std::string device = "CPU"; - ov::genai::SchedulerConfig scheduler_config; - scheduler_config.cache_size = 5; - ov::genai::LLMPipeline pipe( model_path, device, - ov::genai::prompt_lookup(true), - ov::genai::scheduler_config(scheduler_config)); + ov::genai::prompt_lookup(true)); auto streamer = [](std::string subword) { std::cout << subword << std::flush; diff --git a/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp b/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp index 487296566b..e10228863f 100644 --- a/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp +++ b/samples/cpp/speculative_decoding_lm/speculative_decoding_lm.cpp @@ -26,14 +26,10 @@ int main(int argc, char* argv[]) try { // Please, set device for main model in `LLMPipeline` constructor and in in `ov::genai::draft_model` for draft. std::string main_device = "CPU", draft_device = "CPU"; - ov::genai::SchedulerConfig scheduler_config; - scheduler_config.cache_size = 5; - ov::genai::LLMPipeline pipe( main_model_path, main_device, - ov::genai::draft_model(draft_model_path, draft_device), - ov::genai::scheduler_config(scheduler_config)); + ov::genai::draft_model(draft_model_path, draft_device)); auto streamer = [](std::string subword) { std::cout << subword << std::flush; diff --git a/samples/python/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.py b/samples/python/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.py index 557897b6b1..726391ba9b 100755 --- a/samples/python/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.py +++ b/samples/python/prompt_lookup_decoding_lm/prompt_lookup_decoding_lm.py @@ -18,11 +18,8 @@ def main(): args = parser.parse_args() device = 'CPU' - scheduler_config = openvino_genai.SchedulerConfig() - # cache params - scheduler_config.cache_size = 2 - pipe = openvino_genai.LLMPipeline(args.model_dir, device, scheduler_config=scheduler_config, prompt_lookup=True) + pipe = openvino_genai.LLMPipeline(args.model_dir, device, prompt_lookup=True) config = openvino_genai.GenerationConfig() config.max_new_tokens = 100 diff --git a/samples/python/speculative_decoding_lm/speculative_decoding_lm.py b/samples/python/speculative_decoding_lm/speculative_decoding_lm.py index 612e59474e..217b8a2730 100755 --- a/samples/python/speculative_decoding_lm/speculative_decoding_lm.py +++ b/samples/python/speculative_decoding_lm/speculative_decoding_lm.py @@ -25,13 +25,9 @@ def main(): main_device = 'CPU' # GPU can be used as well draft_device = 'CPU' - scheduler_config = openvino_genai.SchedulerConfig() - # cache params - scheduler_config.cache_size = 2 - draft_model = openvino_genai.draft_model(args.draft_model_dir, draft_device) - pipe = openvino_genai.LLMPipeline(args.model_dir, main_device, scheduler_config=scheduler_config, draft_model=draft_model) + pipe = openvino_genai.LLMPipeline(args.model_dir, main_device, draft_model=draft_model) config = openvino_genai.GenerationConfig() config.max_new_tokens = 100 diff --git a/src/cpp/src/block_manager.hpp b/src/cpp/src/block_manager.hpp index dc82897dc8..4ca263777b 100644 --- a/src/cpp/src/block_manager.hpp +++ b/src/cpp/src/block_manager.hpp @@ -205,14 +205,20 @@ class BlockAllocator { * Blocks returned will be vectors with this size, each vector entry to be associated with a separate layer's KV cache. */ BlockAllocator(size_t num_blocks, bool enable_prefix_caching, size_t num_layers = 1) : - m_free_blocks_num(num_layers, num_blocks), m_total_num_blocks(num_blocks), m_num_layers(num_layers), m_enable_prefix_caching(enable_prefix_caching), m_overwriteable_blocks(num_layers) { + m_total_num_blocks(num_blocks), m_num_layers(num_layers), m_enable_prefix_caching(enable_prefix_caching), m_overwriteable_blocks(num_layers) { OPENVINO_ASSERT(num_layers != 0, "num_layers must be non-zero"); m_free_blocks.resize(m_num_layers); - for (auto& per_layer_block_list : m_free_blocks) { - for (int block_id = 0; block_id < m_total_num_blocks; ++block_id) { - per_layer_block_list.push_back(std::make_shared(block_id)); + if (num_blocks > 0) { + m_free_blocks_num = std::vector(num_layers, num_blocks); + for (auto& per_layer_block_list : m_free_blocks) { + for (int block_id = 0; block_id < m_total_num_blocks; ++block_id) { + per_layer_block_list.push_back(std::make_shared(block_id)); + } } } + else { + m_free_blocks_num = std::vector(m_num_layers, 0); + } } ~BlockAllocator() { @@ -220,6 +226,21 @@ class BlockAllocator { // OPENVINO_ASSERT(m_total_num_blocks == m_free_blocks.size()); } + void increase_kv_blocks_number(size_t new_kv_blocks_count) { + OPENVINO_ASSERT(new_kv_blocks_count > m_total_num_blocks, "New blocks number should be more than previous blocks number."); + size_t added_blocks = new_kv_blocks_count - m_total_num_blocks; + for (auto idx = 0; idx < m_free_blocks_num.size(); idx++) { + m_free_blocks_num[idx] += added_blocks; + } + for (auto& per_layer_block_list : m_free_blocks) { + for (int block_id = m_total_num_blocks; block_id < new_kv_blocks_count; ++block_id) { + per_layer_block_list.push_back(std::make_shared(block_id)); + } + } + m_total_num_blocks = new_kv_blocks_count; + } + + /** * Returns the number of free blocks for a given layer. * @param layer_idx Index of the layer. @@ -459,6 +480,13 @@ class BlockAllocator { for (size_t layer_idx = 0; layer_idx < m_num_layers; layer_idx++) sum += num_free_blocks(layer_idx); return static_cast(m_num_layers * m_total_num_blocks - sum) / (m_num_layers * m_total_num_blocks) * 100; } + + /** + * @return The total number of KV blocks . + */ + size_t get_total_number_of_kv_blocks() const { + return m_total_num_blocks; + } }; /** @@ -713,6 +741,21 @@ class BlockManager { return m_allocator.get_used_percentage(); } + /** + * Increases the number of KV blocks. + * @param num_blocks The new number of KV-blocks. + */ + void increase_kv_blocks_number(size_t num_blocks) { + m_allocator.increase_kv_blocks_number(num_blocks); + } + + /** + * @return The total number of KV blocks . + */ + size_t get_total_number_of_kv_blocks() const { + return m_allocator.get_total_number_of_kv_blocks(); + } + /** * @brief Forks a sequence, establishing a new sequence from an existing one, reusing * currently allocated blocks of the existing sequence. diff --git a/src/cpp/src/cache_manager.hpp b/src/cpp/src/cache_manager.hpp index a7444555ab..0c04823f4f 100644 --- a/src/cpp/src/cache_manager.hpp +++ b/src/cpp/src/cache_manager.hpp @@ -15,38 +15,118 @@ class CacheManager { DeviceConfig m_device_config; std::vector m_key_cache; std::vector m_value_cache; + size_t m_num_allocated_kv_blocks = 0; ov::Core m_core; + ov::InferRequest m_request; + + ov::Shape set_first_dim_and_make_static(const ov::PartialShape& shape, size_t dim) { + ov::PartialShape res_shape = shape; + res_shape[0] = dim; + OPENVINO_ASSERT(res_shape.is_static()); + return res_shape.to_shape(); + } + + void update_request_tensor(size_t decoder_layer_id) { + m_request.set_tensor(std::string("key_cache.") + std::to_string(decoder_layer_id), m_key_cache[decoder_layer_id]); + m_request.set_tensor(std::string("value_cache.") + std::to_string(decoder_layer_id), m_value_cache[decoder_layer_id]); + } public: - explicit CacheManager(const DeviceConfig &device_config, ov::Core core) : + explicit CacheManager(const DeviceConfig &device_config, ov::InferRequest request, ov::Core core) : m_device_config(device_config), + m_request(request), m_core(core) { m_key_cache.reserve(m_device_config.get_num_layers()); m_value_cache.reserve(m_device_config.get_num_layers()); + } + + void allocate_cache_if_needed(size_t num_kv_blocks) { + if (m_num_allocated_kv_blocks >= num_kv_blocks) { + return; + } + OPENVINO_ASSERT(m_key_cache.size() == m_value_cache.size()); + m_num_allocated_kv_blocks = num_kv_blocks; + ov::Shape value_cache_shape = set_first_dim_and_make_static(m_device_config.get_value_cache_shape(), num_kv_blocks); + ov::Shape key_cache_shape = set_first_dim_and_make_static(m_device_config.get_key_cache_shape(), num_kv_blocks); + + const std::string device_name = m_device_config.get_device(); + + ov::Coordinate start_key{0,0,0,0}; + ov::Coordinate start_value{0,0,0,0}; - const std::string device_name = device_config.get_device(); if (device_name.find("GPU") == std::string::npos) {// Allocate KV caches for (size_t decoder_layer_id = 0; decoder_layer_id < m_device_config.get_num_layers(); ++decoder_layer_id) { - ov::Tensor key_cache(device_config.get_cache_precision(), device_config.get_key_cache_shape()); - ov::Tensor value_cache(device_config.get_cache_precision(), device_config.get_value_cache_shape()); + ov::Tensor key_cache(m_device_config.get_cache_precision(), key_cache_shape); + ov::Tensor value_cache(m_device_config.get_cache_precision(), value_cache_shape); + + auto key_cache_roi_end = static_cast(key_cache.data()); + auto value_cache_roi_end = static_cast(value_cache.data()); + size_t key_roi_size_byte = 0; + size_t value_roi_size_byte = 0; + + if (m_key_cache.size() > decoder_layer_id) { + ov::Coordinate end_key = m_key_cache[decoder_layer_id].get_shape(); + ov::Coordinate end_value = m_value_cache[decoder_layer_id].get_shape(); + + key_roi_size_byte = m_key_cache[decoder_layer_id].get_byte_size(); + value_roi_size_byte = m_value_cache[decoder_layer_id].get_byte_size(); + key_cache_roi_end = static_cast(key_cache.data()) + key_roi_size_byte; + value_cache_roi_end = static_cast(value_cache.data()) + value_roi_size_byte; + + // copy current cache data + ov::Tensor dst_key_roi(key_cache, start_key, end_key); + ov::Tensor dst_value_roi(value_cache, start_value, end_value); + + m_key_cache[decoder_layer_id].copy_to(dst_key_roi); + m_value_cache[decoder_layer_id].copy_to(dst_value_roi); + + } - // force allocation - std::memset(key_cache.data(), 0, key_cache.get_byte_size()); - std::memset(value_cache.data(), 0, value_cache.get_byte_size()); + // Some optimizations like AVX2, AVX512, AMX require a minimal shape and + // perform multiplying by zero on the excess data. Uninitialized tensor data contain NAN's, + // so NAN * 0 returns non-zero invalid data. + // So we need to set zeros to all newly allocated tensors data. + std::memset(key_cache_roi_end, 0, key_cache.get_byte_size() - key_roi_size_byte); + std::memset(value_cache_roi_end, 0, value_cache.get_byte_size() - value_roi_size_byte); + + // set new cache tensors + if (m_key_cache.size() > decoder_layer_id) { + m_key_cache[decoder_layer_id] = key_cache; + m_value_cache[decoder_layer_id] = value_cache; + } + else { + m_key_cache.emplace_back(key_cache); + m_value_cache.emplace_back(value_cache); + } - m_key_cache.emplace_back(key_cache); - m_value_cache.emplace_back(value_cache); + update_request_tensor(decoder_layer_id); } } else { auto remote_context = m_core.get_default_context(device_name); for (size_t decoder_layer_id = 0; decoder_layer_id < m_device_config.get_num_layers(); ++decoder_layer_id) { - ov::Tensor key_cache = remote_context.create_tensor(device_config.get_cache_precision(), - device_config.get_key_cache_shape()); - ov::Tensor value_cache = remote_context.create_tensor(device_config.get_cache_precision(), - device_config.get_value_cache_shape()); - - m_key_cache.emplace_back(key_cache); - m_value_cache.emplace_back(value_cache); + ov::Tensor key_cache = remote_context.create_tensor(m_device_config.get_cache_precision(), + key_cache_shape); + ov::Tensor value_cache = remote_context.create_tensor(m_device_config.get_cache_precision(), + value_cache_shape); + + if (m_key_cache.size() > decoder_layer_id) { + ov::Coordinate end_key = m_key_cache[decoder_layer_id].get_shape(); + ov::Coordinate end_value = m_value_cache[decoder_layer_id].get_shape(); + + // copy current cache data + ov::RemoteTensor dst_key_roi(key_cache, start_key, end_key); + ov::RemoteTensor dst_value_roi(value_cache, start_value, end_value); + dst_key_roi.copy_from(m_key_cache[decoder_layer_id]); + dst_value_roi.copy_from(m_value_cache[decoder_layer_id]); + + m_key_cache[decoder_layer_id] = key_cache; + m_value_cache[decoder_layer_id] = value_cache; + } + else { + m_key_cache.emplace_back(key_cache); + m_value_cache.emplace_back(value_cache); + } + update_request_tensor(decoder_layer_id); } } } @@ -62,8 +142,8 @@ class CacheManager { } void copy_blocks(const std::map>& block_copy_map) { - ov::Shape key_shape = m_device_config.get_key_cache_shape(); - ov::Shape value_shape = m_device_config.get_value_cache_shape(); + ov::Shape key_shape = set_first_dim_and_make_static(m_device_config.get_key_cache_shape(), m_num_allocated_kv_blocks); + ov::Shape value_shape = set_first_dim_and_make_static(m_device_config.get_value_cache_shape(), m_num_allocated_kv_blocks); ov::Coordinate key_src_start_roi(key_shape.size(), 0); ov::Coordinate key_src_end_roi = key_shape; @@ -98,5 +178,13 @@ class CacheManager { } } } + + std::shared_ptr get_core() { + return std::make_shared(m_core); + } + + std::shared_ptr get_device_config() { + return std::make_shared(m_device_config); + } }; } diff --git a/src/cpp/src/continuous_batching_impl.cpp b/src/cpp/src/continuous_batching_impl.cpp index e1ffd062de..52ec6a8302 100644 --- a/src/cpp/src/continuous_batching_impl.cpp +++ b/src/cpp/src/continuous_batching_impl.cpp @@ -53,11 +53,7 @@ void ContinuousBatchingPipeline::ContinuousBatchingImpl::init( ov::InferRequest infer_request = compiled_model.create_infer_request(); // setup KV caches - m_cache_manager = std::make_shared(device_config, core); - for (size_t decoder_layer_id = 0; decoder_layer_id < device_config.get_num_layers(); ++decoder_layer_id) { - infer_request.set_tensor(std::string("key_cache.") + std::to_string(decoder_layer_id), m_cache_manager->get_key_cache(decoder_layer_id)); - infer_request.set_tensor(std::string("value_cache.") + std::to_string(decoder_layer_id), m_cache_manager->get_value_cache(decoder_layer_id)); - } + m_cache_manager = std::make_shared(device_config, infer_request, core); SchedulerConfig updated_config = scheduler_config; // update KV blocks number in scheduler config @@ -71,8 +67,7 @@ void ContinuousBatchingPipeline::ContinuousBatchingImpl::init( // as it may lead to performance slowdown can_use_partial_preemption = false; } - - m_scheduler = std::make_shared(device_config.get_block_size(), updated_config, device_config.get_num_layers(), can_use_partial_preemption); + m_scheduler = std::make_shared(device_config.get_block_size(), m_cache_manager, updated_config, device_config.get_num_layers(), can_use_partial_preemption); // and finally create model runner bool is_use_cache_eviction = m_scheduler->get_config().use_cache_eviction; m_model_runner = std::make_shared(infer_request, m_scheduler->get_block_size(), device_config.get_num_layers(), is_use_cache_eviction); @@ -133,7 +128,6 @@ void ContinuousBatchingPipeline::ContinuousBatchingImpl::step() { _pull_awaiting_requests(); m_pipeline_metrics.requests = m_requests.size(); - Scheduler::Output scheduler_output; { static ManualTimer timer("scheduling"); diff --git a/src/cpp/src/device_config.hpp b/src/cpp/src/device_config.hpp index 2af4559ef1..371142701c 100644 --- a/src/cpp/src/device_config.hpp +++ b/src/cpp/src/device_config.hpp @@ -12,7 +12,7 @@ namespace ov::genai { class DeviceConfig { ov::element::Type m_kv_cache_type; - ov::Shape m_key_cache_shape, m_value_cache_shape; + ov::PartialShape m_key_cache_shape, m_value_cache_shape; ov::Shape::value_type m_num_kv_heads, m_head_size, m_num_decoder_layers; size_t m_num_kv_blocks = 0; size_t m_block_size = 0; @@ -80,11 +80,10 @@ class DeviceConfig { OPENVINO_THROW(m_device, " is not supported by OpenVINO Continuous Batching"); } - OPENVINO_ASSERT(scheduling_config.num_kv_blocks > 0 || scheduling_config.cache_size > 0, "num_kv_blocks or cache_size should be more than zero."); if (scheduling_config.num_kv_blocks > 0) { m_num_kv_blocks = scheduling_config.num_kv_blocks; } - else { + else if (scheduling_config.cache_size > 0) { m_cache_size = scheduling_config.cache_size; } } @@ -104,23 +103,22 @@ class DeviceConfig { m_head_size += 8; } - if (m_num_kv_blocks == 0) { - OPENVINO_ASSERT(m_cache_size > 0, "num_kv_blocks or cache_size should be more than zero."); + if (m_num_kv_blocks == 0 && m_cache_size > 0) { size_t size_in_bytes = m_cache_size * 1024 * 1024 * 1024; m_num_kv_blocks = size_in_bytes / (m_num_decoder_layers * 2 * m_num_kv_heads * m_block_size * m_head_size * m_kv_cache_type.size()); } - m_key_cache_shape = m_value_cache_shape = ov::Shape{m_num_kv_blocks, - m_num_kv_heads, - m_block_size, - m_head_size}; + m_key_cache_shape = m_value_cache_shape = ov::PartialShape{ov::Dimension::dynamic(), + ov::Dimension(m_num_kv_heads), + ov::Dimension(m_block_size), + ov::Dimension(m_head_size)}; if (m_device.find("GPU") != std::string::npos) { // Update key shape, as the key's shape is different from the value's shape - m_key_cache_shape = ov::Shape{m_num_kv_blocks, - m_num_kv_heads, - m_head_size, - m_block_size}; + m_key_cache_shape = ov::PartialShape{ov::Dimension::dynamic(), + ov::Dimension(m_num_kv_heads), + ov::Dimension(m_head_size), + ov::Dimension(m_block_size)}; } } @@ -136,13 +134,13 @@ class DeviceConfig { return m_num_decoder_layers; } - ov::Shape get_key_cache_shape() const { - OPENVINO_ASSERT(!m_key_cache_shape.empty()); + ov::PartialShape get_key_cache_shape() const { + OPENVINO_ASSERT(m_key_cache_shape.size()); return m_key_cache_shape; } - ov::Shape get_value_cache_shape() const { - OPENVINO_ASSERT(!m_value_cache_shape.empty()); + ov::PartialShape get_value_cache_shape() const { + OPENVINO_ASSERT(m_value_cache_shape.size()); return m_value_cache_shape; } @@ -153,5 +151,9 @@ class DeviceConfig { size_t get_block_size() const { return m_block_size; } + + size_t get_block_size_in_bytes() const { + return m_num_decoder_layers * 2 * m_num_kv_heads * m_block_size * m_head_size * get_cache_precision().size(); + } }; } diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 33180a9199..be5ecf17fa 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -718,7 +718,9 @@ ov::genai::LLMPipeline::LLMPipeline( const ov::AnyMap& properties ){ auto start_time = std::chrono::steady_clock::now(); - if (properties.find(ov::genai::scheduler_config.name()) != properties.end()) { + if (properties.find(ov::genai::scheduler_config.name()) != properties.end() || + properties.find(utils::DRAFT_MODEL_ARG_NAME) != properties.end() || + properties.find(ov::genai::prompt_lookup.name()) != properties.end()) { auto [plugin_config, scheduler_config] = utils::split_scheduler_config(properties); m_pimpl = std::make_unique(models_path, tokenizer, scheduler_config, device, plugin_config); } else if (device == "NPU") { @@ -737,7 +739,9 @@ ov::genai::LLMPipeline::LLMPipeline( ){ auto start_time = std::chrono::steady_clock::now(); - if (config.find(ov::genai::scheduler_config.name()) != config.end()) { + if (config.find(ov::genai::scheduler_config.name()) != config.end() || + config.find(utils::DRAFT_MODEL_ARG_NAME) != config.end() || + config.find(ov::genai::prompt_lookup.name()) != config.end()) { auto [plugin_config, scheduler_config] = utils::split_scheduler_config(config); m_pimpl = std::make_unique(models_path, scheduler_config, device, plugin_config); } else if (device == "NPU") { @@ -760,7 +764,10 @@ ov::genai::LLMPipeline::LLMPipeline( auto [core_properties, plugin_config] = ov::genai::utils::split_core_compile_config(config); auto start_time = std::chrono::steady_clock::now(); - if (plugin_config.find(ov::genai::scheduler_config.name()) != plugin_config.end()) { + if (plugin_config.find(ov::genai::scheduler_config.name()) != plugin_config.end() || + plugin_config.find(utils::DRAFT_MODEL_ARG_NAME) != plugin_config.end() || + plugin_config.find(ov::genai::prompt_lookup.name()) != plugin_config.end()){ + auto [plugin_config_, scheduler_config] = utils::split_scheduler_config(plugin_config); m_pimpl = std::make_unique(model_str, weights_tensor, tokenizer, scheduler_config, device, plugin_config_, generation_config); diff --git a/src/cpp/src/scheduler.hpp b/src/cpp/src/scheduler.hpp index 6de4adaa47..da65c68bec 100644 --- a/src/cpp/src/scheduler.hpp +++ b/src/cpp/src/scheduler.hpp @@ -7,10 +7,12 @@ #include #include +#include "openvino/runtime/intel_gpu/properties.hpp" #include "openvino/genai/scheduler_config.hpp" #include "device_config.hpp" #include "block_manager.hpp" #include "sequence_group.hpp" +#include "cache_manager.hpp" namespace ov::genai { class Scheduler { @@ -20,6 +22,13 @@ class Scheduler { BlockManager m_block_manager; friend class CacheStateDumper; + bool m_dynamic_memory_allocation = false; + + // Dynamic KV-cache allocation params + size_t m_kv_blocks_initial_multiplier = 2; + const float m_cache_growth_factor = 2; // commmon values 1.5 or 2 + + std::shared_ptr m_cache_manager; public: struct Output { // IDs of scheduled groups @@ -36,15 +45,20 @@ class Scheduler { float m_cache_usage = 0.0; }; - explicit Scheduler(size_t block_size, const SchedulerConfig & config = {}, size_t num_layers = 1, bool can_use_partial_preemption = true) : + explicit Scheduler(size_t block_size, std::shared_ptr cache_manager, const SchedulerConfig & config = {}, size_t num_layers = 1, bool can_use_partial_preemption = true) : + m_cache_manager(cache_manager), m_can_use_partial_preemption(can_use_partial_preemption), m_config(config), m_block_manager(m_config.num_kv_blocks, m_config.enable_prefix_caching, block_size, num_layers) { + OPENVINO_ASSERT(num_layers != 0, "num_layers must be non-zero"); } Output schedule(std::vector& sequence_groups) { Output scheduler_output; + if (m_block_manager.get_total_number_of_kv_blocks() == 0) { + _initialize_cache(sequence_groups); + } if (m_config.dynamic_split_fuse) { // deepspeed-mii case @@ -64,9 +78,9 @@ class Scheduler { } } + m_cache_manager->allocate_cache_if_needed(m_block_manager.get_total_number_of_kv_blocks()); _clear_waiting_sequences(sequence_groups); scheduler_output.m_cache_usage = m_block_manager.get_used_percentage(); - return scheduler_output; } @@ -236,8 +250,13 @@ class Scheduler { OPENVINO_ASSERT(currently_allocated_token_slots >= occupied_token_slots, "internal error"); size_t available_slots = currently_allocated_token_slots - occupied_token_slots, required_slots = num_scheduled_tokens > available_slots ? num_scheduled_tokens - available_slots : 0; - size_t num_required_blocks = (required_slots + block_size - 1) / block_size, num_free_blocks = m_block_manager.num_free_blocks(); - size_t num_scheduled_blocks = std::min(num_required_blocks, num_free_blocks); + size_t num_required_blocks = (required_slots + block_size - 1) / block_size; + while (num_required_blocks > m_block_manager.num_free_blocks()) { + if (!_try_increase_cache()) { + break; + } + } + size_t num_scheduled_blocks = std::min(num_required_blocks, m_block_manager.num_free_blocks()); // some scheduled blocks can be no fully occupied, so we need to take min between num_scheduled_blocks // and total "scheduled capacity" num_scheduled_tokens = std::min(num_scheduled_tokens, available_slots + num_scheduled_blocks * block_size); @@ -289,10 +308,16 @@ class Scheduler { size_t num_scheduled_tokens_per_seq = std::min(available_tokens_per_seq_in_megabatch, num_available_tokens_per_seq); sequence_group->schedule_tokens(num_scheduled_tokens_per_seq); + while (!m_block_manager.can_append_slots(sequence_group)){ + if (!_try_increase_cache()) { + break; + } + } + _apply_preemption(sequence_group_id, sequence_groups); // if we can't preemt any more sequences, clear scheduled tokens and move to next sequence - if (!m_block_manager.can_append_slots(sequence_group)){ + if (!m_block_manager.can_append_slots(sequence_group)) { sequence_group->clear_scheduled_tokens(); continue; } @@ -370,6 +395,11 @@ class Scheduler { // apply KV cache limitations size_t block_size = get_block_size(); const size_t num_required_blocks = (sequence_len + block_size - 1) / block_size; + while (!m_block_manager.can_allocate_blocks(num_required_blocks)){ + if (!_try_increase_cache()) { + break; + } + } if (!m_block_manager.can_allocate_blocks(num_required_blocks)) break; @@ -405,6 +435,86 @@ class Scheduler { sequence_groups[sequence_group_id]->clear_waiting_sequences(); } } + + size_t _get_available_gpu_memory() { + auto device_config = m_cache_manager->get_device_config(); + auto core = m_cache_manager->get_core(); + auto device = device_config->get_device(); + OPENVINO_ASSERT(device.find("GPU") != std::string::npos, "_get_available_gpu_memory() is applicable for GPU only."); + auto memory_statistics = core->get_property(device, ov::intel_gpu::memory_statistics); + auto device_type = core->get_property(device, ov::device::type); + + // sum up all used device memory + std::vector device_memory_types = {"cl_mem", "usm_device"}; + size_t used_device_mem = 0; + for (auto mem_type: device_memory_types) { + used_device_mem += memory_statistics[mem_type]; + } + + if (device_type == ov::device::Type::INTEGRATED) { + used_device_mem += memory_statistics["usm_host"]; + } + + // there could be unaccounted extra memory reserved by kernels, kept + // in memory pools, etc + // therefore, add a threshold to account for this + float used_memory_threshold = 1.1; + used_device_mem *= used_memory_threshold; + + // total device memory in bytes + auto total_device_memory = core->get_property(device, ov::intel_gpu::device_total_mem_size); + + return total_device_memory - used_device_mem; + } + + void _initialize_cache(const std::vector& sequence_groups) { + size_t blocks_sum = 0; + for (auto idx = 0; idx < sequence_groups.size(); idx++) { + auto seq_length = sequence_groups[idx]->get_prompt_len() * m_kv_blocks_initial_multiplier; + auto gen_config = sequence_groups[idx]->get_sampling_parameters(); + seq_length = std::min(seq_length, sequence_groups[idx]->get_prompt_len() + gen_config.get_max_new_tokens(sequence_groups[idx]->get_prompt_len())); + size_t blocks_num = std::ceil((float)seq_length / m_block_manager.get_block_size()); + if (gen_config.is_beam_search()) { + blocks_num *= gen_config.num_beams; + } else if (gen_config.is_multinomial()) { + blocks_num *= gen_config.num_return_sequences; + } + blocks_sum += blocks_num; + } + m_block_manager.increase_kv_blocks_number(blocks_sum); + m_dynamic_memory_allocation = true; + } + + bool _try_increase_cache() { + if (!m_dynamic_memory_allocation) { + return false; + } + auto device_config = m_cache_manager->get_device_config(); + auto device = device_config->get_device(); + size_t current_num_of_kv_blocks = m_block_manager.get_total_number_of_kv_blocks(); + size_t new_blocks_num = current_num_of_kv_blocks * m_cache_growth_factor; + + if (device.find("GPU") == std::string::npos) { + m_block_manager.increase_kv_blocks_number(new_blocks_num); + } + else { + size_t available_gpu_memory = _get_available_gpu_memory(); + size_t required_memory = (new_blocks_num - current_num_of_kv_blocks) * device_config->get_block_size_in_bytes(); + if (required_memory <= available_gpu_memory) { + m_block_manager.increase_kv_blocks_number(new_blocks_num); + } else { + size_t possible_blocks_to_add = available_gpu_memory / device_config->get_block_size_in_bytes(); + if (possible_blocks_to_add > 0) { + m_block_manager.increase_kv_blocks_number(current_num_of_kv_blocks + possible_blocks_to_add); + } + else { + return false; + } + } + } + return true; + } + }; } diff --git a/src/cpp/src/speculative_decoding/speculative_decoding_impl.cpp b/src/cpp/src/speculative_decoding/speculative_decoding_impl.cpp index 46b7b106a6..257c20bf01 100644 --- a/src/cpp/src/speculative_decoding/speculative_decoding_impl.cpp +++ b/src/cpp/src/speculative_decoding/speculative_decoding_impl.cpp @@ -52,8 +52,7 @@ ContinuousBatchingPipeline::SpeculativeDecodingImpl::SpeculativeDecodingImpl(con size_t main_cache_size = std::ceil(main_scheduler_config.cache_size * (1.f - k)), draft_cache_size = main_scheduler_config.cache_size - main_cache_size; - OPENVINO_ASSERT(main_cache_size > 0, "KV cache model cache size should be > 0"); - if (draft_cache_size == 0) { + if (draft_cache_size == 0 && main_cache_size > 0) { main_cache_size -= (main_cache_size > 1 ? 1 : 0); draft_cache_size = 1; } diff --git a/src/cpp/src/utils/paged_attention_transformations.cpp b/src/cpp/src/utils/paged_attention_transformations.cpp index 16c9556151..4dedcf989a 100644 --- a/src/cpp/src/utils/paged_attention_transformations.cpp +++ b/src/cpp/src/utils/paged_attention_transformations.cpp @@ -10,11 +10,6 @@ namespace ov { namespace genai { namespace utils { -inline ov::PartialShape to_partial_with_dyn_0_dim(const ov::Shape& static_shape) { - ov::PartialShape partial_shape = static_shape; - partial_shape[0] = ov::Dimension::dynamic(); - return partial_shape; -} size_t get_hidden_size(const std::shared_ptr model) { const auto& parameters = model->get_parameters(); @@ -65,9 +60,8 @@ void set_kv_cache_type_and_shape(std::shared_ptr model, DeviceConfig& for (auto it_k = key_cache_params.begin(), it_v = value_cache_params.begin(); it_k != key_cache_params.end();++it_k, ++it_v) { it_k->second->set_element_type(device_config.get_cache_precision()); it_v->second->set_element_type(device_config.get_cache_precision()); - // TODO: CVS-145270 - it_k->second->set_partial_shape(to_partial_with_dyn_0_dim(device_config.get_key_cache_shape())); - it_v->second->set_partial_shape(to_partial_with_dyn_0_dim(device_config.get_value_cache_shape())); + it_k->second->set_partial_shape(device_config.get_key_cache_shape()); + it_v->second->set_partial_shape(device_config.get_value_cache_shape()); } model->validate_nodes_and_infer_types(); diff --git a/tests/cpp/cache_manager.cpp b/tests/cpp/cache_manager.cpp index b2a5396d5f..7f07980389 100644 --- a/tests/cpp/cache_manager.cpp +++ b/tests/cpp/cache_manager.cpp @@ -7,8 +7,43 @@ #include "scheduler.hpp" #include "device_config.hpp" #include "cache_manager.hpp" +#include "openvino/op/concat.hpp" -TEST(TestCacheManager, general_test) { +using namespace ov::genai; + +std::shared_ptr get_dummy_model(size_t num_layers) { + ov::NodeVector keys; + ov::NodeVector values; + ov::ParameterVector params; + auto shape = ov::PartialShape({ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}); + for (size_t i = 0; i < num_layers; i++) { + auto key = std::make_shared(ov::element::f16, shape); + auto value = std::make_shared(ov::element::f16, shape); + key->get_output_tensor(0).set_names({"key_cache." + std::to_string(i)}); + value->get_output_tensor(0).set_names({"value_cache." + std::to_string(i)}); + keys.push_back(key); + values.push_back(value); + params.push_back(key); + params.push_back(value); + } + const auto& concat1 = std::make_shared(keys, 1); + const auto& concat2 = std::make_shared(values, 1); + auto model = std::make_shared(ov::NodeVector{concat1, concat2}, params); + return std::make_shared(ov::NodeVector{concat1, concat2}, params); +} + +size_t get_total_allocated_bytes(std::shared_ptr cache_manager, size_t num_decoder_layers) { + size_t allocated_bytes = 0; + for (size_t i = 0; i < num_decoder_layers; i++) { + auto key_cache = cache_manager->get_key_cache(i); + auto value_cache = cache_manager->get_value_cache(i); + allocated_bytes += key_cache.get_byte_size() + value_cache.get_byte_size(); + } + return allocated_bytes; +} + + +TEST(TestCacheManager, test_cache_size_param) { ov::Core core; ov::genai::SchedulerConfig scheduler_config; scheduler_config.max_num_batched_tokens = 32; @@ -21,14 +56,73 @@ TEST(TestCacheManager, general_test) { size_t num_decoder_layers = 12; device_config.set_model_params(12, 64, num_decoder_layers); - auto cache_manager = std::make_shared(device_config, core); - - size_t allocated_bytes = 0; - for (size_t i = 0; i < num_decoder_layers; i++) { - auto key_cache = cache_manager->get_key_cache(i); - auto value_cache = cache_manager->get_value_cache(i); - allocated_bytes += key_cache.get_byte_size() + value_cache.get_byte_size(); - } + ov::InferRequest request = core.compile_model(get_dummy_model(num_decoder_layers)).create_infer_request(); + auto cache_manager = std::make_shared(device_config, request, core); + auto block_manager = BlockManager(device_config.get_num_kv_blocks(), false, device_config.get_block_size(), device_config.get_num_layers()); + cache_manager->allocate_cache_if_needed(block_manager.get_total_number_of_kv_blocks()); - ASSERT_EQ(allocated_bytes, 2146959360); + ASSERT_EQ(get_total_allocated_bytes(cache_manager, num_decoder_layers), 2146959360); } + + +TEST(TestCacheManager, test_kv_blocks_param) { + ov::Core core; + ov::genai::SchedulerConfig scheduler_config; + scheduler_config.max_num_batched_tokens = 32; + scheduler_config.num_kv_blocks = 150; + scheduler_config.cache_size = 0; + scheduler_config.max_num_seqs = 2; + + const std::string device = "CPU"; + ov::genai::DeviceConfig device_config(core, scheduler_config, "CPU"); + size_t num_decoder_layers = 12; + device_config.set_model_params(12, 64, num_decoder_layers); + + ov::InferRequest request = core.compile_model(get_dummy_model(num_decoder_layers)).create_infer_request(); + auto cache_manager = std::make_shared(device_config, request, core); + auto block_manager = BlockManager(device_config.get_num_kv_blocks(), false, device_config.get_block_size(), device_config.get_num_layers()); + OPENVINO_ASSERT(block_manager.get_total_number_of_kv_blocks(), scheduler_config.num_kv_blocks); +} + + +TEST(TestCacheManager, test_dynamic_cache_increase) { + ov::Core core; + ov::genai::SchedulerConfig scheduler_config; + scheduler_config.max_num_batched_tokens = 32; + scheduler_config.num_kv_blocks = 0; + scheduler_config.cache_size = 0; + scheduler_config.max_num_seqs = 2; + + const std::string device = "CPU"; + ov::genai::DeviceConfig device_config(core, scheduler_config, "CPU"); + size_t num_decoder_layers = 12; + size_t head_size = 64; + size_t num_kv_heads = 12; + device_config.set_model_params(num_kv_heads, head_size, num_decoder_layers); + size_t block_size_in_bytes = num_decoder_layers * 2 * num_kv_heads * device_config.get_block_size() * head_size * device_config.get_cache_precision().size(); + + + ov::InferRequest request = core.compile_model(get_dummy_model(num_decoder_layers)).create_infer_request(); + auto cache_manager = std::make_shared(device_config, request, core); + auto block_manager = BlockManager(device_config.get_num_kv_blocks(), false, device_config.get_block_size(), device_config.get_num_layers()); + + // check initial cache allocation + block_manager.increase_kv_blocks_number(100); + OPENVINO_ASSERT(block_manager.get_total_number_of_kv_blocks(), 100); + + cache_manager->allocate_cache_if_needed(block_manager.get_total_number_of_kv_blocks()); + OPENVINO_ASSERT(get_total_allocated_bytes(cache_manager, num_decoder_layers), 100 * block_size_in_bytes); + + + // check cache increase + block_manager.increase_kv_blocks_number(200); + OPENVINO_ASSERT(block_manager.get_total_number_of_kv_blocks(), 200); + + cache_manager->allocate_cache_if_needed(block_manager.get_total_number_of_kv_blocks()); + OPENVINO_ASSERT(get_total_allocated_bytes(cache_manager, num_decoder_layers), 200 * block_size_in_bytes); + + + // check that cache does not increase if new blocks were not allocated + cache_manager->allocate_cache_if_needed(block_manager.get_total_number_of_kv_blocks()); + OPENVINO_ASSERT(get_total_allocated_bytes(cache_manager, num_decoder_layers), 200 * block_size_in_bytes); +} \ No newline at end of file diff --git a/tests/cpp/scheduler.cpp b/tests/cpp/scheduler.cpp index 40c3e73747..ea1720faa2 100644 --- a/tests/cpp/scheduler.cpp +++ b/tests/cpp/scheduler.cpp @@ -4,6 +4,7 @@ #include #include "openvino/runtime/core.hpp" +#include "openvino/op/concat.hpp" #include "openvino/genai/continuous_batching_pipeline.hpp" #include "openvino/genai/generation_config.hpp" #include "sequence_group.hpp" @@ -17,6 +18,37 @@ void clear_finished_sequences(std::vector& requests) { }); requests.erase(new_end, requests.end()); } +std::shared_ptr get_model(size_t num_layers) { + ov::NodeVector keys; + ov::NodeVector values; + ov::ParameterVector params; + auto shape = ov::PartialShape({ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}); + for (size_t i = 0; i < num_layers; i++) { + auto key = std::make_shared(ov::element::f16, shape); + auto value = std::make_shared(ov::element::f16, shape); + key->get_output_tensor(0).set_names({"key_cache." + std::to_string(i)}); + value->get_output_tensor(0).set_names({"value_cache." + std::to_string(i)}); + keys.push_back(key); + values.push_back(value); + params.push_back(key); + params.push_back(value); + } + const auto& concat1 = std::make_shared(keys, 1); + const auto& concat2 = std::make_shared(values, 1); + auto model = std::make_shared(ov::NodeVector{concat1, concat2}, params); + return std::make_shared(ov::NodeVector{concat1, concat2}, params); +} + +std::shared_ptr init_cache_manager(SchedulerConfig scheduler_config) { + ov::Core core = ov::Core(); + size_t num_decoder_layers = 12; + ov::InferRequest request = core.compile_model(get_model(num_decoder_layers)).create_infer_request(); + size_t head_size = 64, head_size_u8 = head_size + 8; + size_t num_kv_heads = 12; + ov::genai::DeviceConfig device_config(core, scheduler_config, "CPU"); + device_config.set_model_params(num_kv_heads, head_size_u8, num_decoder_layers); + return std::make_shared(device_config, request, core); +} TEST(TestScheduler, general_test) { std::array configs = {SchedulerConfig(), SchedulerConfig()}; @@ -40,10 +72,9 @@ TEST(TestScheduler, general_test) { ov::genai::greedy(), 4, scheduler_config.enable_prefix_caching); auto idx2 = (*sequence_group3)[0]->get_id(); std::vector requests = {sequence_group1, sequence_group2, sequence_group3}; - // schedule 3 sequence groups that use 6 kv blocks - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); auto out1 = scheduler.schedule(requests); std::vector ref_ids = {0, 1, 2}; @@ -144,7 +175,7 @@ TEST_P(AppendSlotsSchedulerTest, test_append_slots_considers_all_sequences) { auto idx1 = (*sequence_group2)[0]->get_id(); std::vector requests = {sequence_group1, sequence_group2}; - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); auto out1 = scheduler.schedule(requests); std::vector ref_ids = {0, 1}; @@ -212,7 +243,7 @@ TEST_P(PartialPreemptionSchedulerTest, test_partial_preemption) { // schedule 2 sequence groups that use 5 kv blocks - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); auto out0 = scheduler.schedule(requests); for (auto seq: requests) { @@ -297,7 +328,7 @@ TEST(TestScheduler, test_partial_preemption_beam_search) { sequence_group->set_sequence_group_ptr(sequence_group); std::vector requests = {sequence_group}; - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); auto out = scheduler.schedule(requests); for (auto sequence: sequence_group->get_not_finished_sequences()) { sequence->append_token(token, 0.7); @@ -405,11 +436,10 @@ TEST(TestScheduler, test_partially_preempted_prompt) { SequenceGroup::Ptr sequence_group2 = std::make_shared(1, ov::Tensor(ov::element::i64, {tokens.size()}, tokens.data()), ov::genai::greedy(), 4, scheduler_config.enable_prefix_caching); auto idx1 = (*sequence_group2)[0]->get_id(); - std::vector requests = {sequence_group1, sequence_group2}; - + std::vector requests = {sequence_group1, sequence_group2}; // schedule 2 sequence groups that use all available 2*3 kv blocks, we used all available kv-blocks. - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); auto out1 = scheduler.schedule(requests); for (auto seq: requests) { @@ -503,7 +533,7 @@ TEST(TestScheduler, prefix_caching_test) { std::vector prompt_tokens = {0,1,2,3,4,5,6,7}; std::vector histrory_tokens = {}; // schedule prompt - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); size_t chat_iterations = 10; @@ -566,7 +596,7 @@ TEST(TestScheduler, prefix_caching_test_two_identical_sequences) { std::vector prompt_tokens = {0,1,2,3,4,5,6,7}; std::vector histrory_tokens = {}; // schedule prompt - Scheduler scheduler = Scheduler(4, scheduler_config); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config); size_t chat_iterations = 10; @@ -640,7 +670,7 @@ TEST(TestScheduler, prefix_caching_with_max_new_tokens_equal_1) { for (auto scheduler_config: configs) { std::vector prompt_tokens = {0,1,2,3,4,5,6,7}; // schedule prompt - Scheduler scheduler = Scheduler(32, scheduler_config); + Scheduler scheduler = Scheduler(32, init_cache_manager(scheduler_config), scheduler_config); size_t chat_iterations = 2; @@ -701,7 +731,7 @@ TEST(TestScheduler, test_partially_preempted_prompt_not_allowed) { // schedule 2 sequence groups that use all available 2*3 kv blocks, we used all available kv-blocks. const bool can_use_partial_preemption = false; - Scheduler scheduler = Scheduler(4, scheduler_config, 1, can_use_partial_preemption); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config, 1, can_use_partial_preemption); auto out1 = scheduler.schedule(requests); for (auto req : requests) @@ -775,7 +805,7 @@ TEST(TestScheduler, test_partially_preempted_prompt_not_allowed2) { // schedule 2 sequence groups that use all available 2*3 kv blocks, we used all available kv-blocks. const bool can_use_partial_preemption = false; - Scheduler scheduler = Scheduler(4, scheduler_config, 1, can_use_partial_preemption); + Scheduler scheduler = Scheduler(4, init_cache_manager(scheduler_config), scheduler_config, 1, can_use_partial_preemption); scheduler.schedule(requests); for (auto req: requests) req->finish_iteration(); @@ -874,7 +904,6 @@ TEST(TestScheduler, FullyPreemptsCacheEvictedSequences) { scheduler_config.use_cache_eviction = true; scheduler_config.cache_eviction_config = ov::genai::CacheEvictionConfig(2, 2, 6, ov::genai::AggregationMode::NORM_SUM); - std::vector tokens1 = {0, 1}; // 1 full block SequenceGroup::Ptr sequence_group1 = std::make_shared(0, ov::Tensor(ov::element::i64, {tokens1.size()}, @@ -890,7 +919,7 @@ TEST(TestScheduler, FullyPreemptsCacheEvictedSequences) { std::vector requests = {sequence_group1, sequence_group2}; - Scheduler scheduler = Scheduler(2, scheduler_config); + Scheduler scheduler = Scheduler(2, init_cache_manager(scheduler_config), scheduler_config); // prompt phase - schedules 1 block for seq 1, 5 blocks for seq 2 auto out = scheduler.schedule(requests); diff --git a/tests/python_tests/common.py b/tests/python_tests/common.py index 163a00192e..cf5fbb3403 100644 --- a/tests/python_tests/common.py +++ b/tests/python_tests/common.py @@ -266,7 +266,6 @@ def get_test_dataset() -> Tuple[List[str], List[GenerationConfig]]: def get_scheduler_config(scheduler_params: dict = None) -> SchedulerConfig: scheduler_config = SchedulerConfig() - scheduler_config.cache_size = 1 if scheduler_params is None: scheduler_config.dynamic_split_fuse = True # vLLM specific diff --git a/tests/python_tests/ov_genai_test_utils.py b/tests/python_tests/ov_genai_test_utils.py index b633497d32..5f2702a774 100644 --- a/tests/python_tests/ov_genai_test_utils.py +++ b/tests/python_tests/ov_genai_test_utils.py @@ -283,5 +283,4 @@ def load_pipe(configs: List[Tuple], temp_path): @functools.lru_cache(1) def get_continuous_batching(path): scheduler_config = ov_genai.SchedulerConfig() - scheduler_config.cache_size = 1 return ov_genai.LLMPipeline(path, ov_genai.Tokenizer(path), 'CPU', **{"scheduler_config": scheduler_config}) diff --git a/tests/python_tests/test_cache_optimizations.py b/tests/python_tests/test_cache_optimizations.py index 45704f9dc6..3c09d34756 100644 --- a/tests/python_tests/test_cache_optimizations.py +++ b/tests/python_tests/test_cache_optimizations.py @@ -15,7 +15,7 @@ from openvino import serialize from transformers import AutoTokenizer -from common import TESTS_ROOT +from common import TESTS_ROOT, run_test_pipeline def load_prompts_dataset(file_name : str) -> Dict[str, List[str]]: @@ -145,3 +145,28 @@ def test_cache_optimized_generation_is_similar_to_unoptimized(converted_model, t del model_cb_noopt +def get_greedy_seq_len_300() -> GenerationConfig: + generation_config = GenerationConfig() + generation_config.num_return_sequences = 3 + generation_config.max_new_tokens = 300 + return generation_config + +def get_beam_search_seq_len_300() -> GenerationConfig: + generation_config = GenerationConfig() + generation_config.num_beam_groups = 3 + generation_config.num_beams = 6 + generation_config.max_new_tokens = 300 + generation_config.num_return_sequences = generation_config.num_beams + return generation_config + +scheduler_params_list = [ + ({"num_kv_blocks": 0, "cache_size": 0, "dynamic_split_fuse": True, "enable_prefix_caching": True}, get_greedy_seq_len_300()), + ({"num_kv_blocks": 0, "cache_size": 0, "dynamic_split_fuse": False, "max_num_batched_tokens": 600, "enable_prefix_caching": True}, get_beam_search_seq_len_300()), + ({"num_kv_blocks": 0, "cache_size": 0, "dynamic_split_fuse": True, "enable_prefix_caching": False}, get_greedy_seq_len_300()), + ({"num_kv_blocks": 0, "cache_size": 0, "dynamic_split_fuse": False, "max_num_batched_tokens": 600, "enable_prefix_caching": False}, get_beam_search_seq_len_300()), + ({"num_kv_blocks": 0, "cache_size": 0, "dynamic_split_fuse": False, "max_num_batched_tokens": 600, "use_cache_eviction": True, "cache_eviction_config": SHORT_CACHE_EVICTION_CONFIG}, get_greedy_seq_len_300())] +@pytest.mark.parametrize("params", scheduler_params_list) +@pytest.mark.precommit +def test_dynamic_memory_allocation(tmp_path, params): + run_test_pipeline(tmp_path, "facebook/opt-125m", params[0], params[1]) +