-
Notifications
You must be signed in to change notification settings - Fork 200
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[LLM] [NPU] StaticLLMPipeline: Compiler DQ update #1515
Merged
smirnov-alexey
merged 8 commits into
openvinotoolkit:master
from
smirnov-alexey:as/npuw_dq
Jan 14, 2025
Merged
Changes from all commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
ba70ef1
Update DQ query
smirnov-alexey f5dd5b1
Unconditionally utilize compiler DQ
smirnov-alexey cc44a0d
DQ only when in supported props
smirnov-alexey fa76cf7
Add prefix
smirnov-alexey 868a7ac
Align DQ behaviour
smirnov-alexey 3c72f4d
Address review comments
smirnov-alexey 24b1a63
Merge branch 'master' into as/npuw_dq
smirnov-alexey 102a1d9
Merge branch 'master' into as/npuw_dq
smirnov-alexey File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -475,17 +475,16 @@ std::optional<NPUDesc> extract_npu_descriptor(ov::Core& core) { | |
} | ||
const auto arch = core.get_property("NPU", ov::device::architecture); | ||
const auto max_tiles = core.get_property("NPU", ov::intel_npu::max_tiles); | ||
|
||
bool compiler_dq = false; | ||
const auto device_caps = core.get_property("NPU", ov::device::capabilities); | ||
if (std::find(device_caps.begin(), device_caps.end(), | ||
"COMPILER_DYNAMIC_QUANTIZATION") != device_caps.end()) { | ||
const auto supported_properties = core.get_property("NPU", ov::supported_properties); | ||
if (std::find(supported_properties.begin(), supported_properties.end(), | ||
"NPU_COMPILER_DYNAMIC_QUANTIZATION") != supported_properties.end()) { | ||
compiler_dq = true; | ||
} | ||
return std::make_optional(NPUDesc{arch, max_tiles, compiler_dq}); | ||
} | ||
|
||
ov::AnyMap get_baseline_common_config() { | ||
ov::AnyMap get_baseline_common_config(const std::optional<NPUDesc>& npudesc) { | ||
ov::AnyMap config = { | ||
{ "NPU_COMPILATION_MODE_PARAMS", "compute-layers-with-higher-precision=Sqrt,Power,ReduceMean,Add_RMSNorm" }, | ||
{ "NPUW_DEVICES", "NPU" }, | ||
|
@@ -497,11 +496,20 @@ ov::AnyMap get_baseline_common_config() { | |
{ "NPUW_SLICE_OUT", "YES" }, | ||
{ "NPUW_FUNCALL_ASYNC", "YES" } | ||
}; | ||
// FIXME: this config logic is getting more and more complex | ||
if (npudesc.has_value() && npudesc->compiler_dq) { | ||
config.emplace("NPUW_DQ", "YES"); | ||
config.emplace("NPUW_DQ_FULL", "NO"); | ||
config.emplace("NPU_COMPILER_DYNAMIC_QUANTIZATION", "YES"); | ||
config.erase("NPUW_DCOFF_TYPE"); | ||
config.erase("NPUW_DCOFF_SCALE"); | ||
} | ||
return config; | ||
} | ||
|
||
ov::AnyMap get_default_common_config(const std::shared_ptr<ov::Model>& model) { | ||
auto config = get_baseline_common_config(); | ||
ov::AnyMap get_default_common_config(const std::shared_ptr<ov::Model>& model, | ||
const std::optional<NPUDesc>& npudesc) { | ||
auto config = get_baseline_common_config(npudesc); | ||
const char* npu_l0 = std::getenv("DISABLE_OPENVINO_GENAI_NPU_L0"); | ||
if (npu_l0 && std::atoi(npu_l0) == 1) { | ||
config.emplace("NPUW_WEIGHTS_BANK_ALLOC", "CPU"); | ||
|
@@ -513,40 +521,39 @@ ov::AnyMap get_default_common_config(const std::shared_ptr<ov::Model>& model) { | |
|
||
ov::AnyMap get_default_prefill_config(const std::shared_ptr<ov::Model>& model, | ||
const std::optional<NPUDesc>& npudesc) { | ||
auto config = get_default_common_config(model); | ||
if (is_cw_compressed(model)) { | ||
config.emplace("NPUW_DQ", "YES"); | ||
} else { | ||
config.emplace("NPUW_PMM", "NO"); | ||
} | ||
auto config = get_default_common_config(model, npudesc); | ||
if (npudesc.has_value() && | ||
npudesc->arch == "4000" && | ||
npudesc->max_tiles != -1) { | ||
config.emplace("NPU_DPU_GROUPS", npudesc->max_tiles); | ||
} | ||
if (npudesc.has_value() && npudesc->compiler_dq) { | ||
config.emplace("NPUW_DQ_FULL", "NO"); | ||
// Specify NPUW DQ if Compiler DQ is not enabled | ||
if (!npudesc.has_value() || !npudesc->compiler_dq) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not gonna lie @TolyaTalamanov, it was much better WITH that tiny one-liner than without that. |
||
if (is_cw_compressed(model)) { | ||
config.emplace("NPUW_DQ", "YES"); | ||
} else { | ||
config.emplace("NPUW_PMM", "NO"); | ||
} | ||
} | ||
return config; | ||
} | ||
|
||
ov::AnyMap get_default_generate_config(const std::shared_ptr<ov::Model>& model, | ||
const std::optional<NPUDesc>& npudesc, | ||
const GenerateHint hint) { | ||
auto config = get_default_common_config(model); | ||
auto config = get_default_common_config(model, npudesc); | ||
if (hint == GenerateHint::BEST_PERF) { | ||
config.emplace("NPUW_ONLINE_PIPELINE", "NONE"); | ||
} | ||
// NB: Unconditionally set for generation model | ||
config.emplace("NPUW_DQ", "YES"); | ||
if (npudesc.has_value() && npudesc->arch == "4000") { | ||
config.emplace("NPU_DPU_GROUPS", 4); | ||
} | ||
if (hint == GenerateHint::FAST_COMPILE) { | ||
config.emplace("NPUW_UNFOLD_IREQS", "YES"); | ||
} | ||
if (npudesc.has_value() && npudesc->compiler_dq) { | ||
config.emplace("NPUW_DQ_FULL", "NO"); | ||
// Specify NPUW DQ if Compiler DQ is not enabled | ||
if (!npudesc.has_value() || !npudesc->compiler_dq) { | ||
config.emplace("NPUW_DQ", "YES"); | ||
} | ||
return config; | ||
} | ||
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This looks like a sub-string search. Perhaps there is some OV utility to tokenize the list first?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems already simple enough
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How it is a sub-string search, looks more like a container. At least std::find works on it (this
auto
sometimes makes things harder to understand)