diff --git a/.gitignore b/.gitignore index de7a8b03..8929d79a 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,5 @@ scripts test_ext benchmarks/results + +deps diff --git a/.typos.toml b/.typos.toml index f1055cdd..f12eef96 100644 --- a/.typos.toml +++ b/.typos.toml @@ -3,3 +3,4 @@ numer = "numer" nd = "nd" Ba = "Ba" skipt = "skipt" +abd = "abd" diff --git a/Project.toml b/Project.toml index ff5f055c..ebdb6407 100644 --- a/Project.toml +++ b/Project.toml @@ -1,10 +1,11 @@ name = "LuxLib" uuid = "82251201-b29d-42c6-8e01-566dec8acb11" authors = ["Avik Pal and contributors"] -version = "1.2.2" +version = "1.3.0" [deps] ArrayInterface = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" +CEnum = "fa961155-64e5-5f13-b03f-caf6b980ea82" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" CpuId = "adafc99b-e345-5852-983c-f28acb93d879" @@ -28,6 +29,7 @@ SLEEFPirates = "476501e8-09a2-5ece-8869-fb82de89a1fa" Static = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +oneDNN_jll = "3523a63d-8698-5b6f-b2c2-68eaa6bde0f0" [weakdeps] AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" @@ -54,9 +56,10 @@ AMDGPU = "0.9.6, 1" AppleAccelerate = "0.4" ArrayInterface = "7.9" BLISBLAS = "0.1" +CEnum = "0.5" CUDA = "5.3.2" ChainRulesCore = "1.24" -Compat = "4.15.0" +Compat = "4.15" CpuId = "0.3" DispatchDoctor = "0.4.12" EnzymeCore = "0.7.7" @@ -83,3 +86,4 @@ Statistics = "1.10" Tracker = "0.2.34" cuDNN = "1.3" julia = "1.10" +oneDNN_jll = "3.5.3" diff --git a/generators/Project.toml b/generators/Project.toml new file mode 100644 index 00000000..a006f40c --- /dev/null +++ b/generators/Project.toml @@ -0,0 +1,7 @@ +[deps] +Clang = "40e3b903-d033-50b4-a0cc-940c62c95e31" +oneDNN_jll = "3523a63d-8698-5b6f-b2c2-68eaa6bde0f0" + +[compat] +Clang = "0.18" +oneDNN_jll = "3.5.3" diff --git a/generators/generator.toml b/generators/generator.toml new file mode 100644 index 00000000..f3d318c1 --- /dev/null +++ b/generators/generator.toml @@ -0,0 +1,212 @@ +[general] +# it could also be an expression as long as `Meta.parse` can parse this string successfully. +# basically, it should be the `expression` in the following code: +# ccall((function_name, expression), returntype, (argtype1, ...), argvalue1, ...) +library_name = "libdnnl" + +# this entry allows you to specify different library names for different headers. +# in the following example: +# library_names = {"config.h" = "libclang_config", "libclang_p.*.h" = "libclang_patch"} +# those functions in the `config.h` will be generated as: +# ccall((function_name, libclang_config), returntype, (argtype1, ...), argvalue1, ...) +library_names = {} + +# output file path relative to the working directory +output_file_path = "../src/onednn/lib.jl" + +# if these are set, common file (types and constants) and API file (functions) will be separated +# this is for compatibility, so prologue and epilogue are not supported. +# output_api_file_path = "api.jl" +# output_common_file_path = "common.jl" + +# if this entry is not empty, the generator will print the code below to the `output_file_path`. +# module module_name +# +# end # module +module_name = "Lib" + +# if this entry is not empty, the generator will print the code below to the `output_file_path`. +# using jll_pkg_name +# export jll_pkg_name +jll_pkg_name = "oneDNN_jll" + +# for packages that have extra JLL package dependencies +jll_pkg_extra = [] + +# identifiers that starts with the string listed in this entry will be exported. +export_symbol_prefixes = ["CX", "clang_"] + +# the code in the following file will be copy-pasted to `output_file_path` before the generated code. +# this is often used for applying custom patches, e.g. adding missing definitions. +prologue_file_path = "prologue.jl" + +# the code in the following file will be copy-pasted to `output_file_path` after the generated code. +# this is often used for applying custom patches. +epilogue_file_path = "" + +# node with an id in the `output_ignorelist` will be ignored in the printing passes. +# this is very useful for custom editing. +output_ignorelist = [ + "CINDEX_EXPORTS", + "CINDEX_VERSION", + "CINDEX_VERSION_STRING", + "CINDEX_LINKAGE", + "CINDEX_DEPRECATED", + "LLVM_CLANG_C_STRICT_PROTOTYPES_BEGIN", + "LLVM_CLANG_C_STRICT_PROTOTYPES_END", + "LLVM_CLANG_C_EXTERN_C_BEGIN", + "LLVM_CLANG_C_EXTERN_C_END" +] + +# Julia's `@enum` do not allow duplicated values, so by default, C enums are translated to +# CEnum.jl's `@cenum`. +# if this entry is true, `@enum` is used and those duplicated enum constants are just commented. +use_julia_native_enum_type = false + +# use `@cenum` but do not print `using CEnum`. +# this is useful in the case of using `CEnum` directly in the source tree instead of using `CEnum` as a dependency +print_using_CEnum = false + +# Print enums directly as integers without @(c)enum wrapper +# Override above two options +print_enum_as_integer = false + +# use deterministic symbol instead of `gensym`-generated `var"##XXX"` +use_deterministic_symbol = true + +# by default, only those declarations in the local header file are processed. +# those declarations in the system headers will be treated specially and will be generated if necessary. +# if you'd like to generate all of the symbols in the system headers, please set this option to false. +is_local_header_only = true + +# set this option to false if you'd like to ignore the symbols(even if necessary) in the system headers. +generate_isystem_symbols = true + +# if this option is set to true, C code with a style of +# ```c +# typedef struct { +# int x; +# } my_struct; +# ``` +# will be generated as: +# ```julia +# struct my_struct +# x::Cint +# end +# ``` +# instead of +# ```julia +# struct var"##Ctag#NUM" +# x::Cint +# end +# const my_struct = var"##Ctag#NUM" +# ``` +smart_de_anonymize = true + +# if set to true, static functions will be ignored +skip_static_functions = false + +# EXPERIMENTAL +# if this option is set to true, those structs that are not necessary to be an +# immutable struct will be generated as a mutable struct. +# this option is default to false, do read the paragraph below before using this feature. +auto_mutability = false + +# add inner constructor `Foo() = new()` +auto_mutability_with_new = true + +# if you feel like certain structs should not be generated as mutable struct, please add them in the following list. +# for example, if a C function accepts a `Vector` of some type as its argument like: +# void foo(mutable_type *list, int n); +# when calling this function via `ccall`, passing a `Vector{mutable_type}(undef, n)` to the first +# argument will trigger a crash, the reason is mutable structs are not stored inline within a `Vector`, +# one should use `Ref{NTuple{n,mutable_type}}()` instead. +# this is not convenient and that's where the `auto_mutability_ignorelist` comes in. +auto_mutability_ignorelist = [] + +# opposite to `auto_mutability_ignorelist` and has a higher priority +auto_mutability_includelist = [] + +# if set to "raw", extract and dump raw c comment; +# if set to "doxygen", parse and format doxygen comment. +# note: by default, Clang only parses doxygen comment, pass `-fparse-all-comments` to Clang in order to parse non-doxygen comments. +extract_c_comment_style = "doxygen" + +# Pass a function to explicitly generate documentation. It will be called like +# `callback_documentation(node::ExprNode, doc::Vector{String})` if it is +# set. The `doc` argument will contain the docs parsed from the headers if +# `extract_c_comment_style` is set, otherwise it will be an empty vector. +# +# Do *not* set this in the TOML file, it should be set in the generator script +# to a function that takes in an ExprNode and returns a String[] (string +# vector). +# callback_documentation = "" + +# if set to true, single line comment will be printed as """comment""" instead of """\ncomment\n""" +fold_single_line_comment = false + +# if set to "outofline", documentation of struct fields will be collected at the "Fields" section of the struct +# if set to "inline", documentation of struct fields will go right above struct definition +struct_field_comment_style = "outofline" + +# if set to "outofline", documentation of enumerators will be collected at the "Enumerators" section of the enum +enumerator_comment_style = "outofline" + +# if set to true, C function prototype will be included in documentation +show_c_function_prototype = false + +[codegen] +# map C's bool to Julia's Bool instead of `Cuchar` a.k.a `UInt8`. +use_julia_bool = true + +# set this to true if the C routine always expects a NUL-terminated string. +# TODO: support filtering +always_NUL_terminated_string = true + +# generate strictly typed function +is_function_strictly_typed = false + +# if true, opaque pointers in function arguments will be translated to `Ptr{Cvoid}`. +opaque_func_arg_as_PtrCvoid = false + +# if true, opaque types are translated to `mutable struct` instead of `Cvoid`. +opaque_as_mutable_struct = true + +# if true, use Julia 1.5's new `@ccall` macro +use_ccall_macro = true + +# if true, variadic functions are wrapped with `@ccall` macro. Otherwise variadic functions are ignored. +wrap_variadic_function = false + +# generate getproperty/setproperty! methods for the types in the following list +field_access_method_list = [] + +# the generator will prefix the function argument names in the following list with a "_" to +# prevent the generated symbols from conflicting with the symbols defined and exported in Base. +function_argument_conflict_symbols = [] + +# emit constructors for all custom-layout structs like bitfield in the list, +# or set to `true` to do so for all such structs +add_record_constructors = [] + +[codegen.macro] +# it‘s highly recommended to set this entry to "basic". +# if you'd like to skip all of the macros, please set this entry to "disable". +# if you'd like to translate function-like macros to Julia, please set this entry to "aggressive". +macro_mode = "basic" + +# function-like macros in the following list will always be translated. +functionlike_macro_includelist = [ + "CINDEX_VERSION_ENCODE", +] + +# if true, the generator prints the following message as comments. +# "# Skipping MacroDefinition: ..." +add_comment_for_skipped_macro = true + +# if true, ignore any macros that is suffixed with "_H" or in the `ignore_header_guards_with_suffixes` list +ignore_header_guards = true +ignore_header_guards_with_suffixes = [] + +# if true, ignore those pure definition macros in the C code +ignore_pure_definition = true diff --git a/generators/prologue.jl b/generators/prologue.jl new file mode 100644 index 00000000..9defe08c --- /dev/null +++ b/generators/prologue.jl @@ -0,0 +1,6 @@ +using CEnum: @cenum + +const NULL = C_NULL + +# This file is automatically generated by Clang.jl. Don't edit it manually. If needed, +# look at the "generators/" directory and modify the relevant files there. diff --git a/generators/wrap.jl b/generators/wrap.jl new file mode 100644 index 00000000..72a366b7 --- /dev/null +++ b/generators/wrap.jl @@ -0,0 +1,62 @@ +using Clang.Generators +using oneDNN_jll + +cur_dir = pwd() + +cd(@__DIR__) + +include_dir = joinpath(oneDNN_jll.artifact_dir, "include") + +options = load_options(joinpath(@__DIR__, "generator.toml")) + +onednn_headers = [ + joinpath(include_dir, "dnnl.h"), + joinpath(include_dir, "dnnl_types.h"), + joinpath(include_dir, "dnnl_config.h"), + joinpath(include_dir, "dnnl_version.h") +] + +args = get_default_args() +push!(args, "-I$include_dir") + +ctx = create_context(onednn_headers, args, options) + +# run generator +build!(ctx, BUILDSTAGE_NO_PRINTING) + +function rewrite!(e::Expr) + # const DNNL_RUNTIME_SIZE_VAL = size_t(DNNL_RUNTIME_DIM_VAL) + if e.head == :const && e.args[1] isa Expr && e.args[1].head == :(=) && + e.args[1].args[1] == :DNNL_RUNTIME_SIZE_VAL && e.args[1].args[2] isa Expr && + e.args[1].args[2].head == :call && e.args[1].args[2].args[1] == :size_t && + e.args[1].args[2].args[2] == :DNNL_RUNTIME_DIM_VAL + e.args[1].args[2] = unsigned(typemin(Int64)) + return + end + # const DNNL_RUNTIME_DIM_VAL = INT64_MIN + if e.head == :const && e.args[1] isa Expr && e.args[1].head == :(=) && + e.args[1].args[1] == :DNNL_RUNTIME_DIM_VAL && e.args[1].args[2] == :INT64_MIN + e.args[1].args[2] = typemin(Int64) + return + end + # const DNNL_RUNTIME_S32_VAL = DNNL_RUNTIME_S32_VAL_REP + if e.head == :const && e.args[1] isa Expr && e.args[1].head == :(=) && + e.args[1].args[1] == :DNNL_RUNTIME_S32_VAL && + e.args[1].args[2] == :DNNL_RUNTIME_S32_VAL_REP + e.args[1].args[2] = 0 + return + end + return +end + +function rewrite!(dag::ExprDAG) + for node in get_nodes(dag), expr in get_exprs(node) + rewrite!(expr) + end +end + +rewrite!(ctx.dag) + +build!(ctx, BUILDSTAGE_PRINTING_ONLY) + +cd(cur_dir) diff --git a/src/LuxLib.jl b/src/LuxLib.jl index 05c77f60..210ced63 100644 --- a/src/LuxLib.jl +++ b/src/LuxLib.jl @@ -17,7 +17,11 @@ const CRC = ChainRulesCore include("utils.jl") include("traits.jl") + +include("onednn/oneDNN.jl") + include("impl/Impl.jl") + include("api/API.jl") @compat(public, diff --git a/src/onednn/api.jl b/src/onednn/api.jl new file mode 100644 index 00000000..bdd2b59e --- /dev/null +++ b/src/onednn/api.jl @@ -0,0 +1,78 @@ +""" + engine() + +Create a new oneDNN engine. Currently creates a CPU engine. +""" +engine() = Engine() + +""" + global_engine() + +Fetch the global oneDNN engine created in LuxLib. If it doesn't exist, create it. +""" +function global_engine() + if !GLOBAL_ENGINE_INITIALIZED[] + GLOBAL_ENGINE[] = engine() + GLOBAL_ENGINE_INITIALIZED[] = true + end + return GLOBAL_ENGINE[] +end + +""" + get_math_mode() + +Get the current math mode for oneDNN. +""" +function get_math_mode() + mode = Ref{Lib.dnnl_fpmath_mode_t}() + @dnnlcall Lib.dnnl_get_default_fpmath_mode(mode) + dnnl_mode = unwrap_ref(mode) + return if dnnl_mode == Lib.dnnl_fpmath_mode_strict + :strict + elseif dnnl_mode == Lib.dnnl_fpmath_mode_bf16 + :bf16 + elseif dnnl_mode == Lib.dnnl_fpmath_mode_f16 + :f16 + elseif dnnl_mode == Lib.dnnl_fpmath_mode_tf32 + :tf32 + elseif dnnl_mode == Lib.dnnl_fpmath_mode_any + :fastest + else + error("Unknown math mode: $(dnnl_mode). This should not happen. Please open an \ + issue in `LuxLib.jl`.") + end +end + +""" + set_math_mode!(mode) + +Set the current math mode for oneDNN. `mode` must be one of the following: + + - `:strict` -- `Lib.dnnl_fpmath_mode_strict` + - `:bf16` -- `Lib.dnnl_fpmath_mode_bf16` + - `:f16` -- `Lib.dnnl_fpmath_mode_f16` + - `:tf32` -- `Lib.dnnl_fpmath_mode_tf32` + - `:fastest` -- `Lib.dnnl_fpmath_mode_any` + +For details, see [`Lib.dnnl_fpmath_mode_t`](@ref). + +See also [`get_math_mode`](@ref). +""" +function set_math_mode!(mode::Symbol) + dnnl_mode = if mode == :strict + Lib.dnnl_fpmath_mode_strict + elseif mode == :bf16 + Lib.dnnl_fpmath_mode_bf16 + elseif mode == :f16 + Lib.dnnl_fpmath_mode_f16 + elseif mode == :tf32 + Lib.dnnl_fpmath_mode_tf32 + elseif mode == :fastest + Lib.dnnl_fpmath_mode_any + else + error("Invalid math mode: $(mode). Valid modes are `:strict`, `:bf16`, `:f16`, \ + `:tf32`, and `:fastest`.") + end + @dnnlcall Lib.dnnl_set_default_fpmath_mode(dnnl_mode) + return nothing +end diff --git a/src/onednn/lib.jl b/src/onednn/lib.jl new file mode 100644 index 00000000..697b43df --- /dev/null +++ b/src/onednn/lib.jl @@ -0,0 +1,7742 @@ +module Lib + +using oneDNN_jll +export oneDNN_jll + +using CEnum: @cenum + +const NULL = C_NULL + +# This file is automatically generated by Clang.jl. Don't edit it manually. If needed, +# look at the "generators/" directory and modify the relevant files there. + +#! format: off + +""" + dnnl_status_t + +Status values returned by the library functions. + +| Enumerator | Note | +| :--------------------------- | :----------------------------------------------------------------------- | +| dnnl\\_success | The operation was successful | +| dnnl\\_out\\_of\\_memory | The operation failed due to an out-of-memory condition | +| dnnl\\_invalid\\_arguments | The operation failed because of incorrect function arguments | +| dnnl\\_unimplemented | The operation failed because requested functionality is not implemented | +| dnnl\\_last\\_impl\\_reached | The last available implementation is reached | +| dnnl\\_runtime\\_error | Primitive or engine failed on execution | +| dnnl\\_not\\_required | Queried element is not required for given primitive | +| dnnl\\_invalid\\_graph | The graph is not legitimate | +| dnnl\\_invalid\\_graph\\_op | The operation is not legitimate according to op schema | +| dnnl\\_invalid\\_shape | The shape cannot be inferred or compiled | +| dnnl\\_invalid\\_data\\_type | The data type cannot be inferred or compiled | +""" +@cenum dnnl_status_t::UInt32 begin + dnnl_success = 0 + dnnl_out_of_memory = 1 + dnnl_invalid_arguments = 2 + dnnl_unimplemented = 3 + dnnl_last_impl_reached = 4 + dnnl_runtime_error = 5 + dnnl_not_required = 6 + dnnl_invalid_graph = 7 + dnnl_invalid_graph_op = 8 + dnnl_invalid_shape = 9 + dnnl_invalid_data_type = 10 +end + +""" + dnnl_data_type_t + +Data type specification + +| Enumerator | Note | +| :------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_data\\_type\\_undef | Undefined data type, used for empty memory descriptors. | +| dnnl\\_f16 | 16-bit/half-precision floating point. | +| dnnl\\_bf16 | non-standard 16-bit (bfloat16 w/ 7 bit mantissa) floating point. | +| dnnl\\_f32 | 32-bit/single-precision floating point. | +| dnnl\\_s32 | 32-bit signed integer. | +| dnnl\\_s8 | 8-bit signed integer. | +| dnnl\\_u8 | 8-bit unsigned integer. | +| dnnl\\_f64 | 64-bit/double-precision floating point. | +| dnnl\\_boolean | Boolean data type. Size is C++ implementation defined. | +| dnnl\\_f8\\_e5m2 | [OFP8 standard 8-bit floating-point](https://www.opencompute.org/documents/ocp-8-bit-floating-point-specification-ofp8-revision-1-0-2023-06-20-pdf) with a 5-bit exponent and a 2-bit mantissa. | +| dnnl\\_f8\\_e4m3 | [OFP8 standard 8-bit floating-point](https://www.opencompute.org/documents/ocp-8-bit-floating-point-specification-ofp8-revision-1-0-2023-06-20-pdf) with a 4-bit exponent and a 3-bit mantissa. | +| dnnl\\_s4 | 4-bit signed integer. | +| dnnl\\_u4 | 4-bit unsigned integer. | +| dnnl\\_data\\_type\\_max | Parameter to allow internal only data\\_types without undefined behavior. This parameter is chosen to be valid for so long as sizeof(int) >= 2. | +""" +@cenum dnnl_data_type_t::UInt32 begin + dnnl_data_type_undef = 0 + dnnl_f16 = 1 + dnnl_bf16 = 2 + dnnl_f32 = 3 + dnnl_s32 = 4 + dnnl_s8 = 5 + dnnl_u8 = 6 + dnnl_f64 = 7 + dnnl_boolean = 8 + dnnl_f8_e5m2 = 9 + dnnl_f8_e4m3 = 10 + dnnl_s4 = 11 + dnnl_u4 = 12 + dnnl_data_type_max = 32767 +end + +""" +A type to describe tensor dimension. +""" +const dnnl_dim_t = Int64 + +""" +A type to describe tensor dimensions. +""" +const dnnl_dims_t = NTuple{12, dnnl_dim_t} + +""" + dnnl_fpmath_mode_t + +Floating-point math mode + +| Enumerator | Note | +| :---------------------------- | :------------------------------------------------------------- | +| dnnl\\_fpmath\\_mode\\_strict | Default behavior, no downconversions allowed | +| dnnl\\_fpmath\\_mode\\_bf16 | Implicit f32->bf16 conversions allowed | +| dnnl\\_fpmath\\_mode\\_f16 | Implicit f32->f16 conversions allowed | +| dnnl\\_fpmath\\_mode\\_any | Implicit f32->f16, f32->tf32 or f32->bf16 conversions allowed | +| dnnl\\_fpmath\\_mode\\_tf32 | Implicit f32->tf32 conversions allowed | +""" +@cenum dnnl_fpmath_mode_t::UInt32 begin + dnnl_fpmath_mode_strict = 0 + dnnl_fpmath_mode_bf16 = 1 + dnnl_fpmath_mode_f16 = 2 + dnnl_fpmath_mode_any = 3 + dnnl_fpmath_mode_tf32 = 4 +end + +""" + dnnl_accumulation_mode_t + +Accumulation mode + +| Enumerator | Note | +| :----------------------------------- | :------------------------------------------------------------------------------------------------ | +| dnnl\\_accumulation\\_mode\\_strict | Default behavior, f32/f64 for floating point computation, s32 for integer | +| dnnl\\_accumulation\\_mode\\_relaxed | Same as strict but allows some partial accumulators to be rounded to src/dst datatype in memory. | +| dnnl\\_accumulation\\_mode\\_any | uses fastest implementation, could use src/dst datatype or wider datatype for accumulators | +| dnnl\\_accumulation\\_mode\\_s32 | use s32 accumulators during computation | +| dnnl\\_accumulation\\_mode\\_f32 | use f32 accumulators during computation | +| dnnl\\_accumulation\\_mode\\_f16 | use f16 accumulators during computation | +""" +@cenum dnnl_accumulation_mode_t::UInt32 begin + dnnl_accumulation_mode_strict = 0 + dnnl_accumulation_mode_relaxed = 1 + dnnl_accumulation_mode_any = 2 + dnnl_accumulation_mode_s32 = 3 + dnnl_accumulation_mode_f32 = 4 + dnnl_accumulation_mode_f16 = 5 +end + +""" + dnnl_engine_kind_t + +Kinds of engines. + +| Enumerator | Note | +| :------------------ | :---------------------- | +| dnnl\\_any\\_engine | An unspecified engine. | +| dnnl\\_cpu | CPU engine. | +| dnnl\\_gpu | GPU engine. | +""" +@cenum dnnl_engine_kind_t::UInt32 begin + dnnl_any_engine = 0 + dnnl_cpu = 1 + dnnl_gpu = 2 +end + +""" +` dnnl_engine` + +An opaque structure to describe an engine. +""" +mutable struct dnnl_engine end + +""" +An engine handle. +""" +const dnnl_engine_t = Ptr{dnnl_engine} + +""" + dnnl_stream_flags_t + +Stream flags. + +| Enumerator | Note | +| :------------------------------- | :----------------------------- | +| dnnl\\_stream\\_out\\_of\\_order | Out-of-order execution. | +| dnnl\\_stream\\_default\\_flags | Default stream configuration. | +""" +@cenum dnnl_stream_flags_t::UInt32 begin + dnnl_stream_in_order = 1 + dnnl_stream_out_of_order = 2 + dnnl_stream_default_flags = 1 +end + +""" +` dnnl_stream` + +An opaque structure to describe an execution stream. +""" +mutable struct dnnl_stream end + +""" +An execution stream handle. +""" +const dnnl_stream_t = Ptr{dnnl_stream} + +""" +A constant execution stream handle. +""" +const const_dnnl_stream_t = Ptr{dnnl_stream} + +""" + dnnl_version_t + +Structure containing version information as per [Semantic Versioning](https://semver.org) + +| Field | Note | +| :------------ | :--------------------------------------- | +| major | Major version | +| minor | Minor version | +| patch | Patch version | +| hash | Git hash of the sources (may be absent) | +| cpu\\_runtime | CPU runtime | +| gpu\\_runtime | GPU runtime | +""" +struct dnnl_version_t + major::Cint + minor::Cint + patch::Cint + hash::Cstring + cpu_runtime::Cuint + gpu_runtime::Cuint +end + +""" + dnnl_engine_get_count(kind) + +Returns the number of engines of a particular kind. + +# Arguments +* `kind`: Kind of engines to count. +# Returns +Count of the engines. +""" +function dnnl_engine_get_count(kind) + @ccall libdnnl.dnnl_engine_get_count(kind::dnnl_engine_kind_t)::Csize_t +end + +""" + dnnl_engine_create(engine, kind, index) + +Creates an engine. + +# Arguments +* `engine`: Output engine. +* `kind`: Engine kind. +* `index`: Engine index that should be between 0 and the count of engines of the requested kind. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_engine_create(engine, kind, index) + @ccall libdnnl.dnnl_engine_create(engine::Ptr{dnnl_engine_t}, kind::dnnl_engine_kind_t, index::Csize_t)::dnnl_status_t +end + +""" + dnnl_engine_get_kind(engine, kind) + +Returns the kind of an engine. + +# Arguments +* `engine`: Engine to query. +* `kind`: Output engine kind. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_engine_get_kind(engine, kind) + @ccall libdnnl.dnnl_engine_get_kind(engine::dnnl_engine_t, kind::Ptr{dnnl_engine_kind_t})::dnnl_status_t +end + +""" + dnnl_engine_destroy(engine) + +Destroys an engine. + +# Arguments +* `engine`: Engine to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_engine_destroy(engine) + @ccall libdnnl.dnnl_engine_destroy(engine::dnnl_engine_t)::dnnl_status_t +end + +""" + dnnl_stream_create(stream, engine, flags) + +Creates an execution stream. + +# Arguments +* `stream`: Output execution stream. +* `engine`: Engine to create the execution stream on. +* `flags`: Stream behavior flags ( +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_stream_flags_t`](@ref)). +""" +function dnnl_stream_create(stream, engine, flags) + @ccall libdnnl.dnnl_stream_create(stream::Ptr{dnnl_stream_t}, engine::dnnl_engine_t, flags::Cuint)::dnnl_status_t +end + +""" + dnnl_stream_get_engine(stream, engine) + +Returns the engine of a stream object. + +# Arguments +* `stream`: Stream object. +* `engine`: Output engine on which the stream is created. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_stream_get_engine(stream, engine) + @ccall libdnnl.dnnl_stream_get_engine(stream::const_dnnl_stream_t, engine::Ptr{dnnl_engine_t})::dnnl_status_t +end + +""" + dnnl_stream_wait(stream) + +Waits for all primitives in the execution stream to finish computations. + +# Arguments +* `stream`: Execution stream. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_stream_wait(stream) + @ccall libdnnl.dnnl_stream_wait(stream::dnnl_stream_t)::dnnl_status_t +end + +""" + dnnl_stream_destroy(stream) + +Destroys an execution stream. + +# Arguments +* `stream`: Execution stream to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_stream_destroy(stream) + @ccall libdnnl.dnnl_stream_destroy(stream::dnnl_stream_t)::dnnl_status_t +end + +""" + dnnl_get_default_fpmath_mode(mode) + +Returns the floating-point math mode that will be used by default for all subsequently created primitives. + +# Arguments +* `mode`: Output FP math mode. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_get_default_fpmath_mode(mode) + @ccall libdnnl.dnnl_get_default_fpmath_mode(mode::Ptr{dnnl_fpmath_mode_t})::dnnl_status_t +end + +""" + dnnl_set_default_fpmath_mode(mode) + +Sets the floating-point math mode that will be used by default for all subsequently created primitives. + +# Arguments +* `mode`: FP math mode. The possible values are: #dnnl\\_fpmath\\_mode\\_strict, #dnnl\\_fpmath\\_mode\\_bf16, #dnnl\\_fpmath\\_mode\\_f16, #dnnl\\_fpmath\\_mode\\_tf32, #dnnl\\_fpmath\\_mode\\_any. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_set_default_fpmath_mode(mode) + @ccall libdnnl.dnnl_set_default_fpmath_mode(mode::dnnl_fpmath_mode_t)::dnnl_status_t +end + +""" + dnnl_set_verbose(level) + +Configures verbose output to stdout. + +!!! note + + Enabling verbose output affects performance. This setting overrides the ONEDNN\\_VERBOSE environment variable. + +# Arguments +* `level`: Verbosity level: - 0: no verbose output (default), - 1: primitive and graph information at execution, - 2: primitive and graph information at creation/compilation and execution. +# Returns +#dnnl\\_invalid\\_arguments/#dnnl::status::invalid\\_arguments if the `level` value is invalid, and #dnnl\\_success/#dnnl::status::success on success. +""" +function dnnl_set_verbose(level) + @ccall libdnnl.dnnl_set_verbose(level::Cint)::dnnl_status_t +end + +""" + dnnl_version() + +Returns library version information. + +# Returns +Pointer to a constant structure containing - major: major version number, - minor: minor version number, - patch: patch release number, - hash: git commit hash. +""" +function dnnl_version() + @ccall libdnnl.dnnl_version()::Ptr{dnnl_version_t} +end + +""" + dnnl_format_kind_t + +Memory format kind + +| Enumerator | Note | +| :---------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_format\\_kind\\_undef | Undefined memory format kind, used for empty memory descriptors. | +| dnnl\\_format\\_kind\\_any | A special format kind that indicates that the actual format will be selected by a primitive automatically. | +| dnnl\\_blocked | A tensor in a generic format described by the stride and blocking values in each dimension. | +| dnnl\\_format\\_kind\\_opaque | A special format kind that indicates that tensor format is opaque. | +| dnnl\\_format\\_kind\\_max | Parameter to allow internal only format kinds without undefined behavior. This parameter is chosen to be valid for so long as sizeof(int) >= 2. | +""" +@cenum dnnl_format_kind_t::UInt32 begin + dnnl_format_kind_undef = 0 + dnnl_format_kind_any = 1 + dnnl_blocked = 2 + dnnl_format_kind_opaque = 3 + dnnl_format_kind_max = 32767 +end + +""" + dnnl_format_tag_t + +Memory format tag specification. + +oneDNN formats describe physical data layout. The physical layout is described as a sequence of the dimensions as they are laid out in the memory (from the outer-most to the inner-most). Note that this order doesn't affect the logical order of the dimensions that is kept in the `dims` field of the [`dnnl_memory_desc_t`](@ref) structure. The logical order of the dimensions is specified by the primitive that uses the tensor. + +For example, CNN 5D tensor always has its logical dimensions in the order `(batch, channels, depth, height, width)`, while the physical layout might be `NCDHW` (corresponds to #dnnl\\_ncdhw format tag) or `NDHWC` (corresponds to #dnnl\\_ndhwc format tag). + +~~~cpp int batch = 2, channels = 16, depth = 13, height = 13, width = 13; + +int ndims = 5; // 5D tensor [`dnnl_dims_t`](@ref) dims = {batch, channels, depth, height, width}; [`dnnl_memory_desc_t`](@ref) data\\_in\\_ncdhw; [`dnnl_memory_desc_create_with_tag`](@ref)( &data\\_in\\_ncdhw, 5, dims, dnnl\\_f32, dnnl\\_ncdhw); + +// note that in both cases dims passed are the same [`dnnl_memory_desc_t`](@ref) data\\_in\\_ndhwc; [`dnnl_memory_desc_create_with_tag`](@ref)( &data\\_in\\_ndhwc, 5, dims, dnnl\\_f32, dnnl\\_ndhwc); + +[`dnnl_memory_desc_destroy`](@ref)(data\\_in\\_ncdhw); [`dnnl_memory_desc_destroy`](@ref)(data\\_in\\_ndhwc); ~~~ + +Memory format tags can be further divided into two categories: - Domain-agnostic names, i.e. names the do not depend on the tensor usage in the specific primitive. These names use letters from `a` to `l` to denote logical dimension from 1 to 12, and form the order in which the dimensions are laid in memory. For instance, #dnnl\\_ab is used to denote 2D tensor where the second logical dimension (aka `b`) is the innermost, i.e. has stride = 1, and the first logical dimension (`a`) laid out in memory with stride equal to the size of second dimension. On the other hand, #dnnl\\_ba is just transposed version of the same tensor: the first dimension (`a`) becomes the innermost one. - Domain-specific names, i.e. names that make sense only in the context of a certain domain, such as CNN. This names are just aliases to the corresponding domain-agnostic tags and used mostly for the convenience. For example, #dnnl\\_nc is used to denote 2D CNN activations tensor memory format, where channels are the innermost dimension and batch is an outermost one. Moreover, #dnnl\\_nc is just an alias to #dnnl\\_ab, since for oneDNN CNN primitives the logical dimensions of activations tensors come in order: batch, channels, spatial. In other words, batch corresponds to the first logical dimension (`a`), channels correspond to the second one (`b`). + +The following domain-specific notation applies to memory format tags: - `'n'` denotes the mini-batch dimension - `'c'` denotes a channels dimension - When there are multiple channel dimensions (for example, in convolution weights tensor), `'i'` and `'o'` denote dimensions of input and output channels - `'d',` `'h',` and `'w'` denote spatial depth, height, and width respectively + +Upper-case letters indicate that the data is laid out in blocks for a particular dimension. In such cases, the format name contains both upper- and lower-case letters for that dimension with a lower-case letter preceded by the block size. For example: #dnnl\\_nChw8c describes a format where the outermost dimension is mini-batch, followed by the channel block number, followed by the spatial height and width, and finally followed by 8-element channel blocks. + +| Enumerator | Note | +| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_format\\_tag\\_undef | Undefined memory format tag | +| dnnl\\_format\\_tag\\_any | Undefined memory format tag. The primitive selects a format automatically. | +| dnnl\\_a | plain 1D tensor | +| dnnl\\_ab | plain 2D tensor | +| dnnl\\_abc | plain 3D tensor | +| dnnl\\_abcd | plain 4D tensor | +| dnnl\\_abcde | plain 5D tensor | +| dnnl\\_abcdef | plain 6D tensor | +| dnnl\\_abcdefg | plain 7D tensor | +| dnnl\\_abcdefgh | plain 8D tensor | +| dnnl\\_abcdefghi | plain 9D tensor | +| dnnl\\_abcdefghij | plain 10D tensor | +| dnnl\\_abcdefghijk | plain 11D tensor | +| dnnl\\_abcdefghijkl | plain 12D tensor | +| dnnl\\_ba | permuted 2D tensor | +| dnnl\\_acb | permuted 3D tensor | +| dnnl\\_bac | | +| dnnl\\_bca | | +| dnnl\\_cab | | +| dnnl\\_cba | | +| dnnl\\_abdc | permuted 4D tensor | +| dnnl\\_acbd | | +| dnnl\\_acdb | | +| dnnl\\_adbc | | +| dnnl\\_adcb | | +| dnnl\\_bacd | | +| dnnl\\_bcda | | +| dnnl\\_cdab | | +| dnnl\\_cdba | | +| dnnl\\_dcab | | +| dnnl\\_abced | permuted 5D tensor | +| dnnl\\_abdec | | +| dnnl\\_acbde | | +| dnnl\\_acdeb | | +| dnnl\\_adecb | | +| dnnl\\_bacde | | +| dnnl\\_bcdea | | +| dnnl\\_cdeab | | +| dnnl\\_cdeba | | +| dnnl\\_decab | | +| dnnl\\_abcdfe | permuted 6D tensor | +| dnnl\\_abdefc | | +| dnnl\\_abdfce | | +| dnnl\\_acbdef | | +| dnnl\\_adefcb | | +| dnnl\\_defcab | | +| dnnl\\_abcdegf | permuted 7D tensor | +| dnnl\\_abcdefhg | permuted 8D tensor | +| dnnl\\_abcdefgih | permuted 9D tensor | +| dnnl\\_abcdefghji | permuted 10D tensor | +| dnnl\\_abcdefghikj | permuted 11D tensor | +| dnnl\\_abcdefghijlk | permuted 12D tensor | +| dnnl\\_aBc16b | 3D tensor blocked by 2nd dimension with block size 16 | +| dnnl\\_ABc16b16a | | +| dnnl\\_Abc4a | | +| dnnl\\_aBc32b | 3D tensor blocked by 2nd dimension with block size 32 | +| dnnl\\_aBc4b | 3D tensor blocked by 2nd dimension with block size 4 | +| dnnl\\_ABc4b16a4b | | +| dnnl\\_ABc2b8a4b | | +| dnnl\\_ABc16b16a4b | | +| dnnl\\_ABc16b16a2b | | +| dnnl\\_ABc4b4a | | +| dnnl\\_ABc8a16b2a | | +| dnnl\\_ABc8a8b | | +| dnnl\\_ABc8a4b | | +| dnnl\\_aBc8b | 3D tensor blocked by 2nd dimension with block size 8 | +| dnnl\\_ABc8b16a2b | | +| dnnl\\_BAc8a16b2a | | +| dnnl\\_ABc8b8a | | +| dnnl\\_Abcd16a | | +| dnnl\\_Abcd8a | | +| dnnl\\_ABcd16a16b | | +| dnnl\\_Abcd32a | | +| dnnl\\_ABcd32a32b | | +| dnnl\\_aBcd16b | 4D tensor blocked by 2nd dimension with block size 16 | +| dnnl\\_ABcd16b16a | | +| dnnl\\_aBCd16b16c | | +| dnnl\\_aBCd16c16b | | +| dnnl\\_Abcd4a | | +| dnnl\\_aBcd32b | 4D tensor blocked by 2nd dimension with block size 32 | +| dnnl\\_aBcd4b | 4D tensor blocked by 2nd dimension with block size 4 | +| dnnl\\_ABcd4b16a4b | | +| dnnl\\_ABcd16b16a4b | | +| dnnl\\_ABcd16b16a2b | | +| dnnl\\_ABcd4b4a | | +| dnnl\\_ABcd4a4b | | +| dnnl\\_aBCd2c4b2c | | +| dnnl\\_aBCd4b8c2b | | +| dnnl\\_aBCd4c16b4c | | +| dnnl\\_aBCd2c8b4c | | +| dnnl\\_aBCd16c16b4c | | +| dnnl\\_aBCd16c16b2c | | +| dnnl\\_aBCd4c4b | | +| dnnl\\_aBCd4b4c | | +| dnnl\\_ABcd8a16b2a | | +| dnnl\\_ABcd2b8a4b | | +| dnnl\\_ABcd8a8b | | +| dnnl\\_ABcd8a4b | | +| dnnl\\_aBcd8b | 4D tensor blocked by 2nd dimension with block size 8 | +| dnnl\\_aBCd4c8b2c | | +| dnnl\\_ABcd8b16a2b | | +| dnnl\\_aBCd8b16c2b | | +| dnnl\\_BAcd8a16b2a | | +| dnnl\\_ABcd8b8a | 4D tensor blocked by 1st and 2nd dimension with block size 8 | +| dnnl\\_aBCd8b8c | | +| dnnl\\_aBCd8b4c | | +| dnnl\\_aBCd8c16b2c | | +| dnnl\\_ABcde8a16b2a | | +| dnnl\\_aCBd8b16c2b | | +| dnnl\\_aBCd8c8b | | +| dnnl\\_Abcde16a | | +| dnnl\\_Abcde32a | | +| dnnl\\_ABcde16a16b | | +| dnnl\\_BAcde8a16b2a | | +| dnnl\\_aBCd2b4c2b | 4D tensor blocked by 3rd dimension with block size 4 | +| dnnl\\_ABcde4b16a4b | 5D tensor blocked by 1st dimension with block size 16 | +| dnnl\\_ABcde2b8a4b | 5D tensor blocked by 1st dimension with block size 8 | +| dnnl\\_aBcde16b | 5D tensor blocked by 2nd dimension with block size 16 | +| dnnl\\_ABcde16b16a | | +| dnnl\\_aBCde16b16c | | +| dnnl\\_aBCde16c16b | | +| dnnl\\_aBCde2c8b4c | | +| dnnl\\_Abcde4a | | +| dnnl\\_aBcde32b | 5D tensor blocked by 2nd dimension with block size 32 | +| dnnl\\_aBcde4b | 5D tensor blocked by 2nd dimension with block size 4 | +| dnnl\\_ABcde4b4a | | +| dnnl\\_ABcde4a4b | | +| dnnl\\_aBCde4b4c | | +| dnnl\\_aBCde2c4b2c | | +| dnnl\\_aBCde4b8c2b | | +| dnnl\\_aBCde4c16b4c | | +| dnnl\\_aBCde16c16b4c | | +| dnnl\\_aBCde16c16b2c | | +| dnnl\\_aBCde4c4b | | +| dnnl\\_Abcde8a | | +| dnnl\\_ABcde8a8b | | +| dnnl\\_ABcde8a4b | | +| dnnl\\_BAcde16b16a | | +| dnnl\\_aBcde8b | 5D tensor blocked by 2nd dimension with block size 8 | +| dnnl\\_ABcde8b16a2b | | +| dnnl\\_aBCde8b16c2b | | +| dnnl\\_aBCde4c8b2c | | +| dnnl\\_aCBde8b16c2b | | +| dnnl\\_ABcde8b8a | | +| dnnl\\_ABcde32a32b | | +| dnnl\\_aBCde8b8c | | +| dnnl\\_aBCde8b4c | | +| dnnl\\_ABc4a8b8a4b | | +| dnnl\\_ABcd4a8b8a4b | | +| dnnl\\_ABcde4a8b8a4b | | +| dnnl\\_BAc4b8a8b4a | | +| dnnl\\_BAcd4b8a8b4a | | +| dnnl\\_BAcde4b8a8b4a | | +| dnnl\\_ABcd2a8b8a2b | | +| dnnl\\_aBCd4b8c8b4c | | +| dnnl\\_aBCde4b8c8b4c | | +| dnnl\\_aBCde2b8c8b2c | | +| dnnl\\_aBCde8c16b2c | | +| dnnl\\_aBCde8c8b | | +| dnnl\\_aBCde2b4c2b | 5D tensor blocked by 3rd dimension with block size 4 | +| dnnl\\_aBcdef16b | 6D tensor blocked by 2nd dimension with block size 16 | +| dnnl\\_aBCdef16b16c | | +| dnnl\\_aBCdef16c16b | | +| dnnl\\_aBCdef4c16b4c | | +| dnnl\\_aBCdef2c8b4c | 6D tensor blocked by 2nd dimension with block size 8 | +| dnnl\\_aBCdef4c8b2c | | +| dnnl\\_aBCdef2b4c2b | 6D tensor blocked by 3rd dimension with block size 4 | +| dnnl\\_aBcdef4b | 6D tensor blocked by 2nd dimension with block size 4 | +| dnnl\\_aBCdef4c4b | | +| dnnl\\_aBCdef4b4c | | +| dnnl\\_aBCdef2c4b2c | | +| dnnl\\_aBCdef4b8c2b | | +| dnnl\\_aBCdef8b8c | | +| dnnl\\_aBCdef8b4c | | +| dnnl\\_aBCdef8c16b2c | | +| dnnl\\_aBCdef4b8c8b4c | | +| dnnl\\_aBCdef8b16c2b | | +| dnnl\\_aCBdef8b16c2b | | +| dnnl\\_aBCdef8c8b | | +| dnnl\\_aBdc16b | | +| dnnl\\_aBdC16b2c | | +| dnnl\\_aBdC16b4c | | +| dnnl\\_aBdc4b | | +| dnnl\\_aBdc8b | | +| dnnl\\_aBdec16b | | +| dnnl\\_aBdeC16b2c | | +| dnnl\\_aBdeC16b4c | | +| dnnl\\_aBdec32b | | +| dnnl\\_aBdec4b | | +| dnnl\\_aBdec8b | | +| dnnl\\_aBdefc16b | | +| dnnl\\_aBdefC16b2c | | +| dnnl\\_aCBdef16c16b | | +| dnnl\\_aBdefc4b | | +| dnnl\\_aBdefc8b | | +| dnnl\\_Abcdef16a | | +| dnnl\\_Abcdef32a | | +| dnnl\\_aBedc16b | | +| dnnl\\_Acb16a | | +| dnnl\\_AcB16a2b | | +| dnnl\\_AcB16a4b | | +| dnnl\\_Acb4a | | +| dnnl\\_Acb8a | | +| dnnl\\_aCBd16b16c | | +| dnnl\\_aCBd16c16b | | +| dnnl\\_aCBde16b16c | | +| dnnl\\_aCBde16c16b | | +| dnnl\\_Acdb16a | | +| dnnl\\_AcdB16a2b | | +| dnnl\\_AcdB16a4b | | +| dnnl\\_Acdb32a | | +| dnnl\\_Acdb4a | | +| dnnl\\_Acdb8a | | +| dnnl\\_Acdeb16a | | +| dnnl\\_AcdeB16a2b | | +| dnnl\\_Acdeb4a | | +| dnnl\\_Acdeb8a | | +| dnnl\\_Adcb16a | | +| dnnl\\_BAc16a16b | | +| dnnl\\_BAc16b16a | | +| dnnl\\_BAcd16a16b | | +| dnnl\\_BAcd16b16a | | +| dnnl\\_aCBd4c8b8c4b | | +| dnnl\\_aCBde4c8b8c4b | | +| dnnl\\_aCBdef4c8b8c4b | | +| dnnl\\_BAcde16a16b | | +| dnnl\\_aCBdef16b16c | | +| dnnl\\_ABc16b32a | | +| dnnl\\_ABc16b64a | | +| dnnl\\_ABc4b32a4b | | +| dnnl\\_ABc4b64a4b | | +| dnnl\\_ABc8b32a2b | | +| dnnl\\_ABc8b64a2b | | +| dnnl\\_AB16b16a | | +| dnnl\\_AB16b32a | | +| dnnl\\_AB16b64a | | +| dnnl\\_AB8b16a2b | | +| dnnl\\_AB8b32a2b | | +| dnnl\\_AB8b64a2b | | +| dnnl\\_AB4b16a4b | | +| dnnl\\_AB4b32a4b | | +| dnnl\\_AB4b64a4b | | +| dnnl\\_AB16b16a4b | | +| dnnl\\_ABcd16b32a | | +| dnnl\\_ABcd16b64a | | +| dnnl\\_ABcd4b32a4b | | +| dnnl\\_ABcd4b64a4b | | +| dnnl\\_ABcd8b32a2b | | +| dnnl\\_ABcd8b64a2b | | +| dnnl\\_ABcde4b32a4b | | +| dnnl\\_ABcde4b64a4b | | +| dnnl\\_ABcde16b16a4b | | +| dnnl\\_ABcde16b16a2b | | +| dnnl\\_ABcde16b32a | | +| dnnl\\_ABcde16b64a | | +| dnnl\\_ABcde8b32a2b | | +| dnnl\\_ABcde8b64a2b | | +| dnnl\\_aBCdef16c16b4c | | +| dnnl\\_aBCdef16c16b2c | | +| dnnl\\_AB32a32b8a4b | | +| dnnl\\_AB8a4b | | +| dnnl\\_AB32a32b8a2b | | +| dnnl\\_AB8a2b | | +| dnnl\\_abDc32d | | +| dnnl\\_abDC32d4c | | +| dnnl\\_abdEc32e | | +| dnnl\\_abdEC32e2c | | +| dnnl\\_abdEC32e4c | | +| dnnl\\_aBdefC16b4c | | +| dnnl\\_AcdeB16a4b | | +| dnnl\\_ABcd16a16b2a | | +| dnnl\\_ABc16a16b2a | | +| dnnl\\_aBCd16b16c2b | | +| dnnl\\_aBCde16b16c2b | | +| dnnl\\_Acb32a | | +| dnnl\\_AcB32a2b | | +| dnnl\\_AcB32a4b | | +| dnnl\\_Acb48a | | +| dnnl\\_AcB48a2b | | +| dnnl\\_AcB48a4b | | +| dnnl\\_Acb64a | | +| dnnl\\_AcB64a2b | | +| dnnl\\_AcB64a4b | | +| dnnl\\_cBa2b | | +| dnnl\\_cBa4b | | +| dnnl\\_aBdc32b | | +| dnnl\\_aBdC32b2c | | +| dnnl\\_aBdC32b4c | | +| dnnl\\_aBdc48b | | +| dnnl\\_aBdC48b2c | | +| dnnl\\_aBdC48b4c | | +| dnnl\\_aBdc64b | | +| dnnl\\_aBdC64b2c | | +| dnnl\\_aBdC64b4c | | +| dnnl\\_adCb2c | | +| dnnl\\_adCb4c | | +| dnnl\\_AcdB32a2b | | +| dnnl\\_AcdB32a4b | | +| dnnl\\_Acdb48a | | +| dnnl\\_AcdB48a2b | | +| dnnl\\_AcdB48a4b | | +| dnnl\\_Acdb64a | | +| dnnl\\_AcdB64a2b | | +| dnnl\\_AcdB64a4b | | +| dnnl\\_cdBa2b | | +| dnnl\\_cdBa4b | | +| dnnl\\_aBdeC32b2c | | +| dnnl\\_aBdeC32b4c | | +| dnnl\\_aBdec48b | | +| dnnl\\_aBdeC48b2c | | +| dnnl\\_aBdeC48b4c | | +| dnnl\\_aBdec64b | | +| dnnl\\_aBdeC64b2c | | +| dnnl\\_aBdeC64b4c | | +| dnnl\\_adeCb2c | | +| dnnl\\_adeCb4c | | +| dnnl\\_Acdeb32a | | +| dnnl\\_AcdeB32a2b | | +| dnnl\\_AcdeB32a4b | | +| dnnl\\_Acdeb48a | | +| dnnl\\_AcdeB48a2b | | +| dnnl\\_AcdeB48a4b | | +| dnnl\\_Acdeb64a | | +| dnnl\\_AcdeB64a2b | | +| dnnl\\_AcdeB64a4b | | +| dnnl\\_cdeBa2b | | +| dnnl\\_cdeBa4b | | +| dnnl\\_aBdefc32b | | +| dnnl\\_aBdefC32b2c | | +| dnnl\\_aBdefC32b4c | | +| dnnl\\_aBdefc48b | | +| dnnl\\_aBdefC48b2c | | +| dnnl\\_aBdefC48b4c | | +| dnnl\\_aBdefc64b | | +| dnnl\\_aBdefC64b2c | | +| dnnl\\_aBdefC64b4c | | +| dnnl\\_adefCb2c | | +| dnnl\\_adefCb4c | | +| dnnl\\_AB16b32a4b | | +| dnnl\\_AB16b48a4b | | +| dnnl\\_AB16b64a4b | | +| dnnl\\_AB16b16a2b | | +| dnnl\\_AB16b32a2b | | +| dnnl\\_AB16b48a2b | | +| dnnl\\_AB16b64a2b | | +| dnnl\\_ABc16b32a4b | | +| dnnl\\_ABc16b48a4b | | +| dnnl\\_ABc16b64a4b | | +| dnnl\\_ABc16b32a2b | | +| dnnl\\_ABc16b48a2b | | +| dnnl\\_ABc16b64a2b | | +| dnnl\\_ABcd16b32a4b | | +| dnnl\\_ABcd16b48a4b | | +| dnnl\\_ABcd16b64a4b | | +| dnnl\\_ABcd16b32a2b | | +| dnnl\\_ABcd16b48a2b | | +| dnnl\\_ABcd16b64a2b | | +| dnnl\\_ABcde16b32a4b | | +| dnnl\\_ABcde16b48a4b | | +| dnnl\\_ABcde16b64a4b | | +| dnnl\\_ABcde16b32a2b | | +| dnnl\\_ABcde16b48a2b | | +| dnnl\\_ABcde16b64a2b | | +| dnnl\\_ABc32a16b | | +| dnnl\\_ABcd32a16b | | +| dnnl\\_ABcde32a16b | | +| dnnl\\_AB48a16b | | +| dnnl\\_AB48a32b | | +| dnnl\\_ABc40a16b | | +| dnnl\\_ABc40a32b | | +| dnnl\\_aBC48b16c | | +| dnnl\\_aBC48b32c | | +| dnnl\\_ABcd40a16b | | +| dnnl\\_ABcd40a32b | | +| dnnl\\_abCd32c | | +| dnnl\\_abdCe32c | | +| dnnl\\_abdCE32c2e | | +| dnnl\\_BA16a16b2a | | +| dnnl\\_BA16a32b2a | | +| dnnl\\_BA16a48b2a | | +| dnnl\\_BA16a64b2a | | +| dnnl\\_BA16a16b4a | | +| dnnl\\_BA16a32b4a | | +| dnnl\\_BA16a48b4a | | +| dnnl\\_BA16a64b4a | | +| dnnl\\_ABcd8a2b | | +| dnnl\\_aBdeC16c16b2c | | +| dnnl\\_aBdeC16c16b4c | | +| dnnl\\_aBdefC16c16b2c | | +| dnnl\\_AcB16b16a2b | | +| dnnl\\_AcB16b16a4b | | +| dnnl\\_AcdB16b16a2b | | +| dnnl\\_AcdB16b16a4b | | +| dnnl\\_AcdeB16b16a2b | | +| dnnl\\_aBdefC16c16b4c | | +| dnnl\\_AcdeB16b16a4b | | +| dnnl\\_AcB16b32a2b | | +| dnnl\\_AcB16b32a4b | | +| dnnl\\_AcB16b48a2b | | +| dnnl\\_AcB16b48a4b | | +| dnnl\\_AcB16b64a2b | | +| dnnl\\_AcB16b64a4b | | +| dnnl\\_aBdC16c16b2c | | +| dnnl\\_aBdC16c16b4c | | +| dnnl\\_aBdC16c32b2c | | +| dnnl\\_aBdC16c32b4c | | +| dnnl\\_aBdC16c48b2c | | +| dnnl\\_aBdC16c48b4c | | +| dnnl\\_aBdC16c64b2c | | +| dnnl\\_aBdC16c64b4c | | +| dnnl\\_AcdB16b32a2b | | +| dnnl\\_AcdB16b32a4b | | +| dnnl\\_AcdB16b48a2b | | +| dnnl\\_AcdB16b48a4b | | +| dnnl\\_AcdB16b64a2b | | +| dnnl\\_AcdB16b64a4b | | +| dnnl\\_aBdeC16c32b2c | | +| dnnl\\_aBdeC16c32b4c | | +| dnnl\\_aBdeC16c48b2c | | +| dnnl\\_aBdeC16c48b4c | | +| dnnl\\_aBdeC16c64b2c | | +| dnnl\\_aBdeC16c64b4c | | +| dnnl\\_AcdeB16b32a2b | | +| dnnl\\_AcdeB16b32a4b | | +| dnnl\\_AcdeB16b48a2b | | +| dnnl\\_AcdeB16b48a4b | | +| dnnl\\_AcdeB16b64a2b | | +| dnnl\\_AcdeB16b64a4b | | +| dnnl\\_aBdefC16c32b2c | | +| dnnl\\_aBdefC16c32b4c | | +| dnnl\\_aBdefC16c48b2c | | +| dnnl\\_aBdefC16c48b4c | | +| dnnl\\_aBdefC16c64b2c | | +| dnnl\\_aBdefC16c64b4c | | +| dnnl\\_decbA16a | | +| dnnl\\_ABc4a2b | | +| dnnl\\_ABc8a2b | | +| dnnl\\_aBCd8b2c | | +| dnnl\\_ABcde4a2b | | +| dnnl\\_ABcde8a2b | | +| dnnl\\_ABcde40a16b | | +| dnnl\\_ABcde40a32b | | +| dnnl\\_aBCde8b2c | | +| dnnl\\_ABcde4a8b8a2b | | +| dnnl\\_ABcd4a8b8a2b | | +| dnnl\\_ABc4a8b8a2b | | +| dnnl\\_aBCdef4b8c8b2c | | +| dnnl\\_aBCde4b8c8b2c | | +| dnnl\\_aBCd4b8c8b2c | | +| dnnl\\_BAcde4b8a8b2a | | +| dnnl\\_BAcd4b8a8b2a | | +| dnnl\\_BAc4b8a8b2a | | +| dnnl\\_aCBdef4c8b8c2b | | +| dnnl\\_aCBde4c8b8c2b | | +| dnnl\\_aCBd4c8b8c2b | | +| dnnl\\_aBCdef8b2c | | +| dnnl\\_AB32a16b | | +| dnnl\\_AB32a32b | | +| dnnl\\_BA4b8a8b2a | | +| dnnl\\_BA4b8a8b4a | | +| dnnl\\_aBC32b16c | | +| dnnl\\_aBC32b32c | | +| dnnl\\_aCB4c8b8c2b | | +| dnnl\\_aCB4c8b8c4b | | +| dnnl\\_ABcd4a2b | | +| dnnl\\_ABc2b8a16b4a | | +| dnnl\\_ABcd2b8a16b4a | | +| dnnl\\_ABcde2b8a16b4a | | +| dnnl\\_ABc2a8b16a4b | | +| dnnl\\_ABc2a8b16a2b | | +| dnnl\\_ABc2b32a8b | | +| dnnl\\_ABcd2a8b16a4b | | +| dnnl\\_ABcd2a8b16a2b | | +| dnnl\\_aCBd2c8b16c2b | | +| dnnl\\_ABcd2b32a8b | | +| dnnl\\_aBCd2c8b16c2b | | +| dnnl\\_ABcde2a8b16a4b | | +| dnnl\\_ABcde2a8b16a2b | | +| dnnl\\_aCBde2c8b16c2b | | +| dnnl\\_ABcde2b32a8b | | +| dnnl\\_aBC2b8c16b2c | | +| dnnl\\_aBCd2b8c16b2c | | +| dnnl\\_aBCde2b8c16b2c | | +| dnnl\\_aBCdef2b8c16b2c | | +| dnnl\\_BAcde2b8a16b4a | | +| dnnl\\_BAcd2b8a16b4a | | +| dnnl\\_BAc2b8a16b4a | | +| dnnl\\_BAcde2b8a16b2a | | +| dnnl\\_BAcd2b8a16b2a | | +| dnnl\\_BAc2b8a16b2a | | +| dnnl\\_aBCde2c8b16c2b | | +| dnnl\\_aBCdef2c8b16c2b | | +| dnnl\\_aCBdef2c8b16c2b | | +| dnnl\\_aBCd2b8c16b4c | | +| dnnl\\_aBCde2b8c16b4c | | +| dnnl\\_BA4b8a16b2a | | +| dnnl\\_BA4b8a16b4a | | +| dnnl\\_aCB4c8b16c2b | | +| dnnl\\_aCB4c8b16c4b | | +| dnnl\\_BA16a16b | | +| dnnl\\_BA16a32b | | +| dnnl\\_BA16a48b | | +| dnnl\\_BA16a64b | | +| dnnl\\_aCB16c2b | | +| dnnl\\_aCB16c4b | | +| dnnl\\_BA16b2a | | +| dnnl\\_BA16b4a | | +| dnnl\\_aBC16b16c | | +| dnnl\\_aBC16b32c | | +| dnnl\\_AB16a16b | | +| dnnl\\_AB16a32b | | +| dnnl\\_ABcde16a16b2a | | +| dnnl\\_aBCdef16b16c2b | | +| dnnl\\_Acedb16a | | +| dnnl\\_aBdfec16b | | +| dnnl\\_abdEC64e2c | | +| dnnl\\_abdEC64e4c | | +| dnnl\\_aCB16b16c | | +| dnnl\\_aCB16b32c | | +| dnnl\\_aCB16b48c | | +| dnnl\\_aCB16b64c | | +| dnnl\\_aCB16b16c2b | | +| dnnl\\_aCB16b32c2b | | +| dnnl\\_aCB16b48c2b | | +| dnnl\\_aCB16b64c2b | | +| dnnl\\_aCB16b16c4b | | +| dnnl\\_aCB16b32c4b | | +| dnnl\\_aCB16b48c4b | | +| dnnl\\_aCB16b64c4b | | +| dnnl\\_abCd4c | | +| dnnl\\_abCde4c | | +| dnnl\\_abCdef4c | | +| dnnl\\_abCde32c | | +| dnnl\\_abCdef32c | | +| dnnl\\_ABcd16a32b | | +| dnnl\\_decbA8a | | +| dnnl\\_aCdefB16b32c2b | | +| dnnl\\_aCdefB16b32c4b | | +| dnnl\\_aCdefB16b48c2b | | +| dnnl\\_aCdefB16b48c4b | | +| dnnl\\_aCdefB16b64c2b | | +| dnnl\\_aCdefB16b64c4b | | +| dnnl\\_BcdeA16a32b2a | | +| dnnl\\_BcdeA16a32b4a | | +| dnnl\\_BcdeA16a48b2a | | +| dnnl\\_BcdeA16a48b4a | | +| dnnl\\_BcdeA16a64b2a | | +| dnnl\\_BcdeA16a64b4a | | +| dnnl\\_aCdefb32c | | +| dnnl\\_aCdefB32c2b | | +| dnnl\\_aCdefB32c4b | | +| dnnl\\_aCdefb48c | | +| dnnl\\_aCdefB48c2b | | +| dnnl\\_aCdefB48c4b | | +| dnnl\\_aCdefb64c | | +| dnnl\\_aCdefB64c2b | | +| dnnl\\_aCdefB64c4b | | +| dnnl\\_Bcdea32b | | +| dnnl\\_BcdeA32b2a | | +| dnnl\\_BcdeA32b4a | | +| dnnl\\_Bcdea48b | | +| dnnl\\_BcdeA48b2a | | +| dnnl\\_BcdeA48b4a | | +| dnnl\\_Bcdea64b | | +| dnnl\\_BcdeA64b2a | | +| dnnl\\_BcdeA64b4a | | +| dnnl\\_Bca32b | | +| dnnl\\_BcA32b2a | | +| dnnl\\_BcA32b4a | | +| dnnl\\_Bca48b | | +| dnnl\\_BcA48b2a | | +| dnnl\\_BcA48b4a | | +| dnnl\\_Bca64b | | +| dnnl\\_BcA64b2a | | +| dnnl\\_BcA64b4a | | +| dnnl\\_aCdb32c | | +| dnnl\\_aCdB32c2b | | +| dnnl\\_aCdB32c4b | | +| dnnl\\_aCdb48c | | +| dnnl\\_aCdB48c2b | | +| dnnl\\_aCdB48c4b | | +| dnnl\\_aCdb64c | | +| dnnl\\_aCdB64c2b | | +| dnnl\\_aCdB64c4b | | +| dnnl\\_BcA16a16b2a | | +| dnnl\\_BcA16a16b4a | | +| dnnl\\_BcdA16a16b2a | | +| dnnl\\_BcdA16a16b4a | | +| dnnl\\_BcdeA16a16b2a | | +| dnnl\\_BcdeA16a16b4a | | +| dnnl\\_aCdB16b16c2b | | +| dnnl\\_aCdB16b16c4b | | +| dnnl\\_aCdeB16b16c2b | | +| dnnl\\_aCdeB16b16c4b | | +| dnnl\\_aCdefB16b16c2b | | +| dnnl\\_aCdefB16b16c4b | | +| dnnl\\_BcA16a32b2a | | +| dnnl\\_BcA16a32b4a | | +| dnnl\\_BcA16a48b2a | | +| dnnl\\_BcA16a48b4a | | +| dnnl\\_BcA16a64b2a | | +| dnnl\\_BcA16a64b4a | | +| dnnl\\_aCdB16b32c2b | | +| dnnl\\_aCdB16b32c4b | | +| dnnl\\_aCdB16b48c2b | | +| dnnl\\_aCdB16b48c4b | | +| dnnl\\_aCdB16b64c2b | | +| dnnl\\_aCdB16b64c4b | | +| dnnl\\_BcdA16a32b2a | | +| dnnl\\_BcdA16a32b4a | | +| dnnl\\_BcdA16a48b2a | | +| dnnl\\_BcdA16a48b4a | | +| dnnl\\_BcdA16a64b2a | | +| dnnl\\_BcdA16a64b4a | | +| dnnl\\_aCdeB16b32c2b | | +| dnnl\\_aCdeB16b32c4b | | +| dnnl\\_aCdeB16b48c2b | | +| dnnl\\_aCdeB16b48c4b | | +| dnnl\\_aCdeB16b64c2b | | +| dnnl\\_aCdeB16b64c4b | | +| dnnl\\_Bca16b | | +| dnnl\\_BcA16b2a | | +| dnnl\\_BcA16b4a | | +| dnnl\\_Bcda16b | | +| dnnl\\_BcdA16b2a | | +| dnnl\\_BcdA16b4a | | +| dnnl\\_Bcdea16b | | +| dnnl\\_BcdeA16b2a | | +| dnnl\\_BcdeA16b4a | | +| dnnl\\_aCdb16c | | +| dnnl\\_aCdB16c2b | | +| dnnl\\_aCdB16c4b | | +| dnnl\\_aCdeb16c | | +| dnnl\\_aCdeB16c2b | | +| dnnl\\_aCdeB16c4b | | +| dnnl\\_aCdefb16c | | +| dnnl\\_aCdefB16c2b | | +| dnnl\\_aCdefB16c4b | | +| dnnl\\_Bcda32b | | +| dnnl\\_BcdA32b2a | | +| dnnl\\_BcdA32b4a | | +| dnnl\\_Bcda48b | | +| dnnl\\_BcdA48b2a | | +| dnnl\\_BcdA48b4a | | +| dnnl\\_Bcda64b | | +| dnnl\\_BcdA64b2a | | +| dnnl\\_BcdA64b4a | | +| dnnl\\_aCdeb32c | | +| dnnl\\_aCdeB32c2b | | +| dnnl\\_aCdeB32c4b | | +| dnnl\\_aCdeb48c | | +| dnnl\\_aCdeB48c2b | | +| dnnl\\_aCdeB48c4b | | +| dnnl\\_aCdeb64c | | +| dnnl\\_aCdeB64c2b | | +| dnnl\\_aCdeB64c4b | | +| dnnl\\_Acb24a | | +| dnnl\\_Acdb24a | | +| dnnl\\_Acdeb24a | | +| dnnl\\_aBdc24b | | +| dnnl\\_aBdec24b | | +| dnnl\\_aBdefc24b | | +| dnnl\\_abDc16d | | +| dnnl\\_abdEc16e | | +| dnnl\\_abdCe16c | | +| dnnl\\_AcB24a2b | | +| dnnl\\_AcdB24a2b | | +| dnnl\\_AcdeB24a2b | | +| dnnl\\_aBdC24b2c | | +| dnnl\\_aBdeC24b2c | | +| dnnl\\_aBdefC24b2c | | +| dnnl\\_AcB8a2b | | +| dnnl\\_AcdB8a2b | | +| dnnl\\_AcdeB8a2b | | +| dnnl\\_aBdC8b2c | | +| dnnl\\_aBdeC8b2c | | +| dnnl\\_aBdefC8b2c | | +| dnnl\\_AB8b32a | | +| dnnl\\_ABc8b32a | | +| dnnl\\_ABcd8b32a | | +| dnnl\\_ABcde8b32a | | +| dnnl\\_AB8b24a | | +| dnnl\\_ABc8b24a | | +| dnnl\\_ABcd8b24a | | +| dnnl\\_ABcde8b24a | | +| dnnl\\_AB8b16a | | +| dnnl\\_ABc8b16a | | +| dnnl\\_ABcd8b16a | | +| dnnl\\_ABcde8b16a | | +| dnnl\\_AB8b8a | | +| dnnl\\_AB4b8a4b | | +| dnnl\\_AB4b24a4b | | +| dnnl\\_ABc4b8a4b | | +| dnnl\\_ABc4b24a4b | | +| dnnl\\_ABcd4b8a4b | | +| dnnl\\_ABcd4b24a4b | | +| dnnl\\_ABcde4b8a4b | | +| dnnl\\_ABcde4b24a4b | | +| dnnl\\_AB8b24a2b | | +| dnnl\\_ABc8b24a2b | | +| dnnl\\_ABcd8b24a2b | | +| dnnl\\_ABcde8b24a2b | | +| dnnl\\_AB8b8a2b | | +| dnnl\\_ABc8b8a2b | | +| dnnl\\_ABcd8b8a2b | | +| dnnl\\_ABcde8b8a2b | | +| dnnl\\_AcB24a4b | | +| dnnl\\_AcdB24a4b | | +| dnnl\\_AcdeB24a4b | | +| dnnl\\_aBdC24b4c | | +| dnnl\\_aBdeC24b4c | | +| dnnl\\_aBdefC24b4c | | +| dnnl\\_AcB8a4b | | +| dnnl\\_AcdB8a4b | | +| dnnl\\_AcdeB8a4b | | +| dnnl\\_aBdC8b4c | | +| dnnl\\_aBdeC8b4c | | +| dnnl\\_aBdefC8b4c | | +| dnnl\\_Bca8b | | +| dnnl\\_BcA8b2a | | +| dnnl\\_Bcda8b | | +| dnnl\\_BcdA8b2a | | +| dnnl\\_Bcdea8b | | +| dnnl\\_BcdeA8b2a | | +| dnnl\\_aCdb8c | | +| dnnl\\_aCdB8c2b | | +| dnnl\\_aCdeb8c | | +| dnnl\\_aCdeB8c2b | | +| dnnl\\_aCdefb8c | | +| dnnl\\_aCdefB8c2b | | +| dnnl\\_Bca24b | | +| dnnl\\_BcA24b2a | | +| dnnl\\_Bcda24b | | +| dnnl\\_BcdA24b2a | | +| dnnl\\_Bcdea24b | | +| dnnl\\_BcdeA24b2a | | +| dnnl\\_aCdb24c | | +| dnnl\\_aCdB24c2b | | +| dnnl\\_aCdeb24c | | +| dnnl\\_aCdeB24c2b | | +| dnnl\\_aCdefb24c | | +| dnnl\\_aCdefB24c2b | | +| dnnl\\_BcA8b4a | | +| dnnl\\_BcdA8b4a | | +| dnnl\\_BcdeA8b4a | | +| dnnl\\_aCdB8c4b | | +| dnnl\\_aCdeB8c4b | | +| dnnl\\_aCdefB8c4b | | +| dnnl\\_BcA24b4a | | +| dnnl\\_BcdA24b4a | | +| dnnl\\_BcdeA24b4a | | +| dnnl\\_aCdB24c4b | | +| dnnl\\_aCdeB24c4b | | +| dnnl\\_aCdefB24c4b | | +| dnnl\\_AB16b48a | | +| dnnl\\_ABc16b48a | | +| dnnl\\_ABcd16b48a | | +| dnnl\\_ABcde16b48a | | +| dnnl\\_ABc16a4b | | +| dnnl\\_ABcd16a4b | | +| dnnl\\_ABcde16a4b | | +| dnnl\\_defcbA16a | | +| dnnl\\_defcbA8a | | +| dnnl\\_AcB16b64a | | +| dnnl\\_AcdB16b64a | | +| dnnl\\_AcdeB16b64a | | +| dnnl\\_AcB16b48a | | +| dnnl\\_AcdB16b48a | | +| dnnl\\_AcdeB16b48a | | +| dnnl\\_AcB16b32a | | +| dnnl\\_AcdB16b32a | | +| dnnl\\_AcdeB16b32a | | +| dnnl\\_AcB16b16a | | +| dnnl\\_AcdB16b16a | | +| dnnl\\_AcdeB16b16a | | +| dnnl\\_AcB8b32a | | +| dnnl\\_AcdB8b32a | | +| dnnl\\_AcdeB8b32a | | +| dnnl\\_AcB8b24a | | +| dnnl\\_AcdB8b24a | | +| dnnl\\_AcdeB8b24a | | +| dnnl\\_AcB8b16a | | +| dnnl\\_AcdB8b16a | | +| dnnl\\_AcdeB8b16a | | +| dnnl\\_AcB8b8a | | +| dnnl\\_AcdB8b8a | | +| dnnl\\_AcdeB8b8a | | +| dnnl\\_AcB8b64a2b | | +| dnnl\\_AcdB8b64a2b | | +| dnnl\\_AcdeB8b64a2b | | +| dnnl\\_AcB8b32a2b | | +| dnnl\\_AcdB8b32a2b | | +| dnnl\\_AcdeB8b32a2b | | +| dnnl\\_AcB8b24a2b | | +| dnnl\\_AcdB8b24a2b | | +| dnnl\\_AcdeB8b24a2b | | +| dnnl\\_AcB8b16a2b | | +| dnnl\\_AcdB8b16a2b | | +| dnnl\\_AcdeB8b16a2b | | +| dnnl\\_AcB8b8a2b | | +| dnnl\\_AcdB8b8a2b | | +| dnnl\\_AcdeB8b8a2b | | +| dnnl\\_AcB4b64a4b | | +| dnnl\\_AcdB4b64a4b | | +| dnnl\\_AcdeB4b64a4b | | +| dnnl\\_AcB4b32a4b | | +| dnnl\\_AcdB4b32a4b | | +| dnnl\\_AcdeB4b32a4b | | +| dnnl\\_AcB4b24a4b | | +| dnnl\\_AcdB4b24a4b | | +| dnnl\\_AcdeB4b24a4b | | +| dnnl\\_AcB4b16a4b | | +| dnnl\\_AcdB4b16a4b | | +| dnnl\\_AcdeB4b16a4b | | +| dnnl\\_AcB4b8a4b | | +| dnnl\\_AcdB4b8a4b | | +| dnnl\\_AcdeB4b8a4b | | +| dnnl\\_Ab4a | | +| dnnl\\_Ab8a | | +| dnnl\\_BA4b4a | | +| dnnl\\_BA8b4a | | +| dnnl\\_BA2a24b | | +| dnnl\\_aCB2b24c | | +| dnnl\\_BA2a8b | | +| dnnl\\_aCB2b8c | | +| dnnl\\_BA8a24b | | +| dnnl\\_aCB8b24c | | +| dnnl\\_BA8a16b | | +| dnnl\\_aCB8b16c | | +| dnnl\\_BA8a8b | | +| dnnl\\_aCB8b8c | | +| dnnl\\_bcad | | +| dnnl\\_cabd | | +| dnnl\\_dabc | | +| dnnl\\_format\\_tag\\_last | Just a sentinel, not real memory format tag. Must be changed after new format tag is added. | +| dnnl\\_x | 1D tensor, an alias to #dnnl\\_a | +| dnnl\\_nc | 2D CNN activations tensor, an alias to #dnnl\\_ab | +| dnnl\\_cn | 2D CNN activations tensor, an alias to #dnnl\\_ba | +| dnnl\\_tn | 2D RNN statistics tensor, an alias to #dnnl\\_ab | +| dnnl\\_nt | 2D RNN statistics tensor, an alias to #dnnl\\_ba | +| dnnl\\_ncw | 3D CNN activations tensor, an alias to #dnnl\\_abc | +| dnnl\\_nwc | 3D CNN activations tensor, an alias to #dnnl\\_acb | +| dnnl\\_nchw | 4D CNN activations tensor, an alias to #dnnl\\_abcd | +| dnnl\\_nhwc | 4D CNN activations tensor, an alias to #dnnl\\_acdb | +| dnnl\\_chwn | 4D CNN activations tensor, an alias to #dnnl\\_bcda | +| dnnl\\_ncdhw | 5D CNN activations tensor, an alias to #dnnl\\_abcde | +| dnnl\\_ndhwc | 5D CNN activations tensor, an alias to #dnnl\\_acdeb | +| dnnl\\_oi | 2D CNN weights tensor, an alias to #dnnl\\_ab | +| dnnl\\_io | 2D CNN weights tensor, an alias to #dnnl\\_ba | +| dnnl\\_oiw | 3D CNN weights tensor, an alias to #dnnl\\_abc | +| dnnl\\_owi | 3D CNN weights tensor, an alias to #dnnl\\_acb | +| dnnl\\_wio | 3D CNN weights tensor, an alias to #dnnl\\_cba | +| dnnl\\_woi | 3D CNN weights tensor, an alias to #dnnl\\_cab | +| dnnl\\_iwo | 3D CNN weights tensor, an alias to #dnnl\\_bca | +| dnnl\\_oihw | 4D CNN weights tensor, an alias to #dnnl\\_abcd | +| dnnl\\_hwio | 4D CNN weights tensor, an alias to #dnnl\\_cdba | +| dnnl\\_hwoi | 4D CNN weights tensor, an alias to #dnnl\\_cdab | +| dnnl\\_ohwi | 4D CNN weights tensor, an alias to #dnnl\\_acdb | +| dnnl\\_ihwo | 4D CNN weights tensor, an alias to #dnnl\\_bcda | +| dnnl\\_iohw | 4D CNN weights tensor, an alias to #dnnl\\_bacd | +| dnnl\\_oidhw | 5D CNN weights tensor, an alias to #dnnl\\_abcde | +| dnnl\\_iodhw | 5D CNN weights tensor, an alias to #dnnl\\_bacde | +| dnnl\\_dhwio | 5D CNN weights tensor, an alias to #dnnl\\_cdeba | +| dnnl\\_dhwoi | 5D CNN weights tensor, an alias to #dnnl\\_cdeab | +| dnnl\\_odhwi | 5D CNN weights tensor, an alias to #dnnl\\_acdeb | +| dnnl\\_idhwo | 5D CNN weights tensor, an alias to #dnnl\\_bcdea | +| dnnl\\_goiw | 4D CNN weights tensor (incl. groups), an alias to #dnnl\\_abcd | +| dnnl\\_gowi | 4D CNN weights tensor (incl. groups), an alias to #dnnl\\_abdc | +| dnnl\\_wigo | 4D CNN weights tensor (incl. groups), an alias to #dnnl\\_dcab | +| dnnl\\_goihw | 5D CNN weights tensor (incl. groups), an alias to #dnnl\\_abcde | +| dnnl\\_gohwi | 5D CNN weights tensor (incl. groups), an alias to #dnnl\\_abdec | +| dnnl\\_hwigo | 5D CNN weights tensor (incl. groups), an alias to #dnnl\\_decab | +| dnnl\\_giohw | 5D CNN weights tensor (incl. groups), an alias to #dnnl\\_acbde | +| dnnl\\_goidhw | 6D CNN weights tensor (incl. groups), an alias to #dnnl\\_abcdef | +| dnnl\\_godhwi | 6D CNN weights tensor (incl. groups), an alias to #dnnl\\_abdefc | +| dnnl\\_giodhw | 6D CNN weights tensor (incl. groups), an alias to #dnnl\\_acbdef | +| dnnl\\_dhwigo | 6D CNN weights tensor (incl. groups), an alias to #dnnl\\_defcab | +| dnnl\\_tnc | 3D RNN data tensor in the format (seq\\_length, batch, input channels), an alias to #dnnl\\_abc. | +| dnnl\\_ntc | 3D RNN data tensor in the format (batch, seq\\_length, input channels), an alias to #dnnl\\_bac. | +| dnnl\\_ldnc | 4D RNN states tensor in the format (num\\_layers, num\\_directions, batch, state channels), an alias to #dnnl\\_abcd. | +| dnnl\\_ldigo | 5D RNN weights tensor in the format (num\\_layers, num\\_directions, input\\_channels, num\\_gates, output\\_channels), an alias to #dnnl\\_abcde. - For LSTM cells, the gates order is input, forget, candidate and output gate. - For GRU cells, the gates order is update, reset and output gate. | +| dnnl\\_ldgoi | 5D RNN weights tensor in the format (num\\_layers, num\\_directions, num\\_gates, output\\_channels, input\\_channels), an alias to #dnnl\\_abdec. - For LSTM cells, the gates order is input, forget, candidate and output gate. - For GRU cells, the gates order is update, reset and output gate. | +| dnnl\\_ldio | 4D LSTM projection tensor in the format (num\\_layers, num\\_directions, num\\_channels\\_in\\_hidden\\_state, num\\_channels\\_in\\_recurrent\\_projection), an alias to #dnnl\\_abcd. | +| dnnl\\_ldoi | 4D LSTM projection tensor in the format (num\\_layers, num\\_directions, num\\_channels\\_in\\_recurrent\\_projection, num\\_channels\\_in\\_hidden\\_state), an alias to #dnnl\\_abdc. | +| dnnl\\_ldgo | 4D RNN bias tensor in the format (num\\_layers, num\\_directions, num\\_gates, output\\_channels), an alias to #dnnl\\_abcd. - For LSTM cells, the gates order is input, forget, candidate and output gate. - For GRU cells, the gates order is update, reset and output gate. | +| dnnl\\_ldOi16o | 5D LSTM projection tensor | +| dnnl\\_ldOi32o | | +| dnnl\\_ldOI32o4i | | +| dnnl\\_ldIo32i | | +| dnnl\\_ldgOi16o | 6D RNN weights tensor | +| dnnl\\_ldgOi32o | | +| dnnl\\_ldgOI32o2i | | +| dnnl\\_ldgOI32o4i | | +| dnnl\\_ldgOI64o2i | | +| dnnl\\_ldgOI64o4i | | +| dnnl\\_ldgIo16i | | +| dnnl\\_ldgIo32i | | +| dnnl\\_ldgIO32i2o | | +| dnnl\\_nCdhw32c | 5D CNN activations tensor blocked by channels with block size 32, an alias to #dnnl\\_aBcde32b | +| dnnl\\_nCdhw16c | 5D CNN activations tensor blocked by channels with block size 16, an alias to #dnnl\\_aBcde16b | +| dnnl\\_nCdhw4c | 5D CNN activations tensor blocked by channels with block size 4, an alias to #dnnl\\_aBcde4b | +| dnnl\\_nCdhw8c | 5D CNN activations tensor blocked by channels with block size 8, an alias to #dnnl\\_aBcde8b | +| dnnl\\_nChw32c | 4D CNN activations tensor blocked by channels with block size 32, an alias to #dnnl\\_aBcd32b | +| dnnl\\_nChw16c | 4D CNN activations tensor blocked by channels with block size 16, an alias to #dnnl\\_aBcd16b | +| dnnl\\_nChw4c | 4D CNN activations tensor blocked by channels with block size 4, an alias to #dnnl\\_aBcd4b | +| dnnl\\_nChw8c | 4D CNN activations tensor blocked by channels with block size 8, an alias to #dnnl\\_aBcd8b | +| dnnl\\_nCw32c | 3D CNN activations tensor blocked by channels with block size 32, an alias to #dnnl\\_aBc32b | +| dnnl\\_nCw16c | 3D CNN activations tensor blocked by channels with block size 16, an alias to #dnnl\\_aBc16b | +| dnnl\\_nCw4c | 3D CNN activations tensor blocked by channels with block size 4, an alias to #dnnl\\_aBc4b | +| dnnl\\_nCw8c | 3D CNN activations tensor blocked by channels with block size 8, an alias to #dnnl\\_aBc8b | +| dnnl\\_NCw16n16c | | +| dnnl\\_NCdhw16n16c | | +| dnnl\\_NChw16n16c | | +| dnnl\\_NCw32n16c | | +| dnnl\\_NChw32n16c | | +| dnnl\\_NChw16n32c | | +| dnnl\\_NCdhw32n16c | | +| dnnl\\_NCw32n32c | | +| dnnl\\_NChw32n32c | | +| dnnl\\_NCdhw32n32c | | +| dnnl\\_OI16i16o | | +| dnnl\\_OI16i32o | | +| dnnl\\_OI16i48o | | +| dnnl\\_OI16i64o | | +| dnnl\\_OI8i8o2i | | +| dnnl\\_OI8i16o2i | | +| dnnl\\_OI8i24o2i | | +| dnnl\\_OI8i32o2i | | +| dnnl\\_OI8i64o2i | | +| dnnl\\_OI4i8o4i | | +| dnnl\\_OI4i16o4i | | +| dnnl\\_OI4i24o4i | | +| dnnl\\_OI4i32o4i | | +| dnnl\\_OI4i64o4i | | +| dnnl\\_OI16i16o4i | | +| dnnl\\_OI8i32o | | +| dnnl\\_OI8i24o | | +| dnnl\\_OI8i16o | | +| dnnl\\_OI8i8o | | +| dnnl\\_IOw16o16i | | +| dnnl\\_IOw16i16o | | +| dnnl\\_OIw16i16o | | +| dnnl\\_OwI16i16o | | +| dnnl\\_OIw16i32o | | +| dnnl\\_OwI16i32o | | +| dnnl\\_OIw16i48o | | +| dnnl\\_OwI16i48o | | +| dnnl\\_OIw16i64o | | +| dnnl\\_OwI16i64o | | +| dnnl\\_OIw16o16i | | +| dnnl\\_Oiw16o | | +| dnnl\\_OIw4i8o4i | | +| dnnl\\_OwI4i8o4i | | +| dnnl\\_OIw4i16o4i | | +| dnnl\\_OwI4i16o4i | | +| dnnl\\_OIw4i24o4i | | +| dnnl\\_OwI4i24o4i | | +| dnnl\\_OIw4i32o4i | | +| dnnl\\_OwI4i32o4i | | +| dnnl\\_OIw4i64o4i | | +| dnnl\\_OwI4i64o4i | | +| dnnl\\_OIw2i8o4i | | +| dnnl\\_OIw16i16o4i | | +| dnnl\\_OIw16i16o2i | | +| dnnl\\_OIw16o16i2o | | +| dnnl\\_OIw4i4o | | +| dnnl\\_OIw4o4i | | +| dnnl\\_Oiw4o | | +| dnnl\\_OIw8i8o2i | | +| dnnl\\_OwI8i8o2i | | +| dnnl\\_OIw8i16o2i | | +| dnnl\\_OwI8i16o2i | | +| dnnl\\_OIw8i24o2i | | +| dnnl\\_OwI8i24o2i | | +| dnnl\\_OIw8i32o2i | | +| dnnl\\_OwI8i32o2i | | +| dnnl\\_OIw8i64o2i | | +| dnnl\\_OwI8i64o2i | | +| dnnl\\_OIw8i8o | | +| dnnl\\_OwI8i8o | | +| dnnl\\_OIw8o16i2o | | +| dnnl\\_IOw8o16i2o | | +| dnnl\\_OIw8o8i | | +| dnnl\\_OIw8o4i | | +| dnnl\\_Owi16o | | +| dnnl\\_OwI16o2i | | +| dnnl\\_OwI16o4i | | +| dnnl\\_Iwo8i | | +| dnnl\\_IwO8i2o | | +| dnnl\\_IwO8i4o | | +| dnnl\\_Iwo16i | | +| dnnl\\_IwO16i2o | | +| dnnl\\_IwO16i4o | | +| dnnl\\_Iwo24i | | +| dnnl\\_IwO24i2o | | +| dnnl\\_IwO24i4o | | +| dnnl\\_Owi4o | | +| dnnl\\_Owi8o | | +| dnnl\\_OwI8o2i | | +| dnnl\\_OIw8i32o | | +| dnnl\\_OwI8i32o | | +| dnnl\\_OIw8i24o | | +| dnnl\\_OwI8i24o | | +| dnnl\\_OIw8i16o | | +| dnnl\\_OwI8i16o | | +| dnnl\\_OwI8o4i | | +| dnnl\\_IOhw16i16o | | +| dnnl\\_IOhw16o16i | | +| dnnl\\_Ohwi16o | | +| dnnl\\_OhwI16o2i | | +| dnnl\\_OhwI16o4i | | +| dnnl\\_Ihwo8i | | +| dnnl\\_IhwO8i2o | | +| dnnl\\_IhwO8i4o | | +| dnnl\\_Ihwo16i | | +| dnnl\\_IhwO16i2o | | +| dnnl\\_IhwO16i4o | | +| dnnl\\_Ihwo24i | | +| dnnl\\_IhwO24i2o | | +| dnnl\\_IhwO24i4o | | +| dnnl\\_Ohwi24o | | +| dnnl\\_Ohwi32o | | +| dnnl\\_Ohwi4o | | +| dnnl\\_Ohwi8o | | +| dnnl\\_OhwI8o2i | | +| dnnl\\_OhwI8o4i | | +| dnnl\\_OIhw16i16o | | +| dnnl\\_OhwI16i16o | | +| dnnl\\_OIhw16i32o | | +| dnnl\\_OhwI16i32o | | +| dnnl\\_OIhw16i48o | | +| dnnl\\_OhwI16i48o | | +| dnnl\\_OIhw16i64o | | +| dnnl\\_OhwI16i64o | | +| dnnl\\_OIhw16o16i | | +| dnnl\\_Oihw16o | | +| dnnl\\_OIhw4i8o4i | | +| dnnl\\_OhwI4i8o4i | | +| dnnl\\_OIhw4i16o4i | | +| dnnl\\_OhwI4i16o4i | | +| dnnl\\_OIhw4i24o4i | | +| dnnl\\_OhwI4i24o4i | | +| dnnl\\_OIhw4i32o4i | | +| dnnl\\_OhwI4i32o4i | | +| dnnl\\_OIhw4i64o4i | | +| dnnl\\_OhwI4i64o4i | | +| dnnl\\_OIhw16i16o4i | | +| dnnl\\_OIhw16i16o2i | | +| dnnl\\_OIhw16o16i2o | | +| dnnl\\_OIhw4i4o | | +| dnnl\\_OIhw4o4i | | +| dnnl\\_Oihw4o | | +| dnnl\\_OIhw8i8o2i | | +| dnnl\\_OhwI8i8o2i | | +| dnnl\\_OIhw8i16o2i | | +| dnnl\\_OhwI8i16o2i | | +| dnnl\\_OIhw8i32o2i | | +| dnnl\\_OhwI8i32o2i | | +| dnnl\\_OIhw8i24o2i | | +| dnnl\\_OhwI8i24o2i | | +| dnnl\\_OIhw8i64o2i | | +| dnnl\\_OhwI8i64o2i | | +| dnnl\\_OIhw8i8o | | +| dnnl\\_OhwI8i8o | | +| dnnl\\_OIhw8o16i2o | | +| dnnl\\_OIhw2i8o4i | | +| dnnl\\_IOhw8o16i2o | | +| dnnl\\_OIhw8o8i | | +| dnnl\\_OIhw8o4i | | +| dnnl\\_Owhi16o | | +| dnnl\\_OIhw8i32o | | +| dnnl\\_OhwI8i32o | | +| dnnl\\_OIhw8i24o | | +| dnnl\\_OhwI8i24o | | +| dnnl\\_OIhw8i16o | | +| dnnl\\_OhwI8i16o | | +| dnnl\\_Odhwi16o | | +| dnnl\\_OdhwI16o2i | | +| dnnl\\_OdhwI16o4i | | +| dnnl\\_Idhwo8i | | +| dnnl\\_IdhwO8i2o | | +| dnnl\\_IdhwO8i4o | | +| dnnl\\_Idhwo16i | | +| dnnl\\_IdhwO16i2o | | +| dnnl\\_IdhwO16i4o | | +| dnnl\\_Idhwo24i | | +| dnnl\\_IdhwO24i2o | | +| dnnl\\_IdhwO24i4o | | +| dnnl\\_Odhwi4o | | +| dnnl\\_Odhwi8o | | +| dnnl\\_OdhwI8o2i | | +| dnnl\\_OdhwI8o4i | | +| dnnl\\_Odwhi16o | | +| dnnl\\_OIdhw16i16o | | +| dnnl\\_OdhwI16i16o | | +| dnnl\\_OIdhw16i32o | | +| dnnl\\_OdhwI16i32o | | +| dnnl\\_OIdhw16i48o | | +| dnnl\\_OdhwI16i48o | | +| dnnl\\_OIdhw16i64o | | +| dnnl\\_OdhwI16i64o | | +| dnnl\\_OIdhw16o16i | | +| dnnl\\_Oidhw16o | | +| dnnl\\_OIdhw4i4o | | +| dnnl\\_OIdhw4o4i | | +| dnnl\\_Oidhw4o | | +| dnnl\\_OIdhw8i8o2i | | +| dnnl\\_OdhwI8i8o2i | | +| dnnl\\_OIdhw8i16o2i | | +| dnnl\\_OdhwI8i16o2i | | +| dnnl\\_OIdhw8i32o2i | | +| dnnl\\_OdhwI8i32o2i | | +| dnnl\\_OIdhw8i24o2i | | +| dnnl\\_OdhwI8i24o2i | | +| dnnl\\_OIdhw8i64o2i | | +| dnnl\\_OdhwI8i64o2i | | +| dnnl\\_OIdhw8i8o | | +| dnnl\\_OdhwI8i8o | | +| dnnl\\_OIdhw8o16i2o | | +| dnnl\\_IOdhw8o16i2o | | +| dnnl\\_OIdhw4i8o4i | | +| dnnl\\_OdhwI4i8o4i | | +| dnnl\\_OIdhw4i16o4i | | +| dnnl\\_OdhwI4i16o4i | | +| dnnl\\_OIdhw4i24o4i | | +| dnnl\\_OdhwI4i24o4i | | +| dnnl\\_OIdhw4i32o4i | | +| dnnl\\_OdhwI4i32o4i | | +| dnnl\\_OIdhw4i64o4i | | +| dnnl\\_OdhwI4i64o4i | | +| dnnl\\_OIdhw16i16o4i | | +| dnnl\\_OIdhw16i16o2i | | +| dnnl\\_OIdhw2i8o4i | | +| dnnl\\_OIdhw8o8i | | +| dnnl\\_OIdhw8o4i | | +| dnnl\\_IOdhw16i16o | | +| dnnl\\_OIdhw4o8i8o4i | | +| dnnl\\_IOdhw16o16i | | +| dnnl\\_OIdhw16o16i2o | | +| dnnl\\_OIdhw8i32o | | +| dnnl\\_OdhwI8i32o | | +| dnnl\\_OIdhw8i24o | | +| dnnl\\_OdhwI8i24o | | +| dnnl\\_OIdhw8i16o | | +| dnnl\\_OdhwI8i16o | | +| dnnl\\_Goiw16g | | +| dnnl\\_Goiw8g | | +| dnnl\\_Goiw4g | | +| dnnl\\_gIOw16o16i | | +| dnnl\\_gIOw16i16o | | +| dnnl\\_gOIw16i16o | | +| dnnl\\_gOIw16o16i | | +| dnnl\\_gOiw16o | | +| dnnl\\_gOIw4i16o4i | | +| dnnl\\_gOIw2i8o4i | | +| dnnl\\_gOIw16i16o4i | | +| dnnl\\_gOIw16i16o2i | | +| dnnl\\_gOIw16o16i2o | | +| dnnl\\_gOIw4i4o | | +| dnnl\\_gOIw4o4i | | +| dnnl\\_gOiw4o | | +| dnnl\\_gOIw8i16o2i | | +| dnnl\\_gOIw8i8o | | +| dnnl\\_gOIw8o16i2o | | +| dnnl\\_gIOw8o16i2o | | +| dnnl\\_gOIw8o8i | | +| dnnl\\_gOIw8o4i | | +| dnnl\\_gOwi16o | | +| dnnl\\_gOwI16o2i | | +| dnnl\\_gOwI16o4i | | +| dnnl\\_gIwo8i | | +| dnnl\\_gIwO8i2o | | +| dnnl\\_gIwO8i4o | | +| dnnl\\_gIwo16i | | +| dnnl\\_gIwO16i2o | | +| dnnl\\_gIwO16i4o | | +| dnnl\\_gIwo24i | | +| dnnl\\_gIwO24i2o | | +| dnnl\\_gIwO24i4o | | +| dnnl\\_gOwi4o | | +| dnnl\\_gOwi8o | | +| dnnl\\_gOwI8o2i | | +| dnnl\\_gOwI8o4i | | +| dnnl\\_Goiw32g | | +| dnnl\\_gOIw2i4o2i | | +| dnnl\\_gOIw2o4i2o | | +| dnnl\\_gOIw4i8o2i | | +| dnnl\\_gOIw4o8i2o | | +| dnnl\\_goIw4i | | +| dnnl\\_goIw32i | | +| dnnl\\_gIOhw16i16o | | +| dnnl\\_gIOhw16o16i | | +| dnnl\\_gOhwi16o | | +| dnnl\\_gOhwI16o2i | | +| dnnl\\_gOhwI16o4i | | +| dnnl\\_gIhwo8i | | +| dnnl\\_gIhwO8i2o | | +| dnnl\\_gIhwO8i4o | | +| dnnl\\_gIhwo16i | | +| dnnl\\_gIhwO16i2o | | +| dnnl\\_gIhwO16i4o | | +| dnnl\\_gIhwo24i | | +| dnnl\\_gIhwO24i2o | | +| dnnl\\_gIhwO24i4o | | +| dnnl\\_gOhwi32o | | +| dnnl\\_gOhwi24o | | +| dnnl\\_gOhwI24o2i | | +| dnnl\\_gOhwI24o4i | | +| dnnl\\_gOhwi4o | | +| dnnl\\_gOhwi8o | | +| dnnl\\_gOhwI8o2i | | +| dnnl\\_gOhwI8o4i | | +| dnnl\\_Goihw16g | | +| dnnl\\_gOIhw16i16o | | +| dnnl\\_gOIhw16o16i | | +| dnnl\\_gOihw16o | | +| dnnl\\_gOIhw2i8o4i | | +| dnnl\\_gOIhw4i16o4i | | +| dnnl\\_gOIhw16i16o4i | | +| dnnl\\_gOIhw16i16o2i | | +| dnnl\\_gOIhw16o16i2o | | +| dnnl\\_gOIhw4i4o | | +| dnnl\\_gOIhw4o4i | | +| dnnl\\_gOihw4o | | +| dnnl\\_Goihw8g | | +| dnnl\\_Goihw4g | | +| dnnl\\_gOIhw8i16o2i | | +| dnnl\\_gOIhw8i8o | | +| dnnl\\_gOIhw8o16i2o | | +| dnnl\\_gIOhw8o16i2o | | +| dnnl\\_gOIhw8o8i | | +| dnnl\\_gOIhw8o4i | | +| dnnl\\_Goihw32g | | +| dnnl\\_gOwhi16o | | +| dnnl\\_goIhw4i | | +| dnnl\\_goIhw32i | | +| dnnl\\_OIw4o8i8o4i | | +| dnnl\\_OIhw4o8i8o4i | | +| dnnl\\_IOw4i8o8i4o | | +| dnnl\\_IOhw4i8o8i4o | | +| dnnl\\_IOdhw4i8o8i4o | | +| dnnl\\_OIhw2o8i8o2i | | +| dnnl\\_gOIw4o8i8o4i | | +| dnnl\\_gOIhw4o8i8o4i | | +| dnnl\\_gOIdhw4o8i8o4i | | +| dnnl\\_gIOw4i8o8i4o | | +| dnnl\\_gIOhw4i8o8i4o | | +| dnnl\\_gIOdhw4i8o8i4o | | +| dnnl\\_gOIhw2o8i8o2i | | +| dnnl\\_gOIhw2i4o2i | | +| dnnl\\_gOIhw2o4i2o | | +| dnnl\\_gOIhw4i8o2i | | +| dnnl\\_gOIhw4o8i2o | | +| dnnl\\_gIOdhw16i16o | | +| dnnl\\_gIOdhw16o16i | | +| dnnl\\_gOdhwi16o | | +| dnnl\\_gOdhwI16o2i | | +| dnnl\\_gOdhwI16o4i | | +| dnnl\\_gIdhwo8i | | +| dnnl\\_gIdhwO8i2o | | +| dnnl\\_gIdhwO8i4o | | +| dnnl\\_gIdhwo16i | | +| dnnl\\_gIdhwO16i2o | | +| dnnl\\_gIdhwO16i4o | | +| dnnl\\_gIdhwo24i | | +| dnnl\\_gIdhwO24i2o | | +| dnnl\\_gIdhwO24i4o | | +| dnnl\\_gOdhwi4o | | +| dnnl\\_gOdhwi8o | | +| dnnl\\_gOdhwI8o2i | | +| dnnl\\_gOdhwI8o4i | | +| dnnl\\_gOdwhi16o | | +| dnnl\\_gOIdhw16i16o | | +| dnnl\\_gOIdhw4i16o4i | | +| dnnl\\_gOIdhw16i16o4i | | +| dnnl\\_gOIdhw2i8o4i | | +| dnnl\\_gOIdhw16i16o2i | | +| dnnl\\_gOIdhw16o16i | | +| dnnl\\_gOIdhw16o16i2o | | +| dnnl\\_gOidhw16o | | +| dnnl\\_gOIdhw4i4o | | +| dnnl\\_gOIdhw4o4i | | +| dnnl\\_gOidhw4o | | +| dnnl\\_gOIdhw8i16o2i | | +| dnnl\\_gOIdhw8i8o | | +| dnnl\\_gOIdhw8o16i2o | | +| dnnl\\_gIOdhw8o16i2o | | +| dnnl\\_gOIdhw8o8i | | +| dnnl\\_gOIdhw8o4i | | +| dnnl\\_Goidhw16g | | +| dnnl\\_Goidhw32g | | +| dnnl\\_gOIdhw2i4o2i | | +| dnnl\\_gOIdhw4i8o2i | | +| dnnl\\_gOIdhw2o4i2o | | +| dnnl\\_gOIdhw4o8i2o | | +| dnnl\\_goIdhw4i | | +| dnnl\\_goIdhw32i | | +| dnnl\\_Owi24o | | +| dnnl\\_OwI24o2i | | +| dnnl\\_OwI24o4i | | +| dnnl\\_Owi32o | | +| dnnl\\_OwI32o2i | | +| dnnl\\_OwI32o4i | | +| dnnl\\_Owi48o | | +| dnnl\\_OwI48o2i | | +| dnnl\\_OwI48o4i | | +| dnnl\\_Owi64o | | +| dnnl\\_OwI64o2i | | +| dnnl\\_OwI64o4i | | +| dnnl\\_Iwo32i | | +| dnnl\\_IwO32i2o | | +| dnnl\\_IwO32i4o | | +| dnnl\\_Iwo48i | | +| dnnl\\_IwO48i2o | | +| dnnl\\_IwO48i4o | | +| dnnl\\_Iwo64i | | +| dnnl\\_IwO64i2o | | +| dnnl\\_IwO64i4o | | +| dnnl\\_wIo2i | | +| dnnl\\_wIo4i | | +| dnnl\\_gOwi24o | | +| dnnl\\_gOwI24o2i | | +| dnnl\\_gOwI24o4i | | +| dnnl\\_gOwi32o | | +| dnnl\\_gOwI32o2i | | +| dnnl\\_gOwI32o4i | | +| dnnl\\_gOwi48o | | +| dnnl\\_gOwI48o2i | | +| dnnl\\_gOwI48o4i | | +| dnnl\\_gOwi64o | | +| dnnl\\_gOwI64o2i | | +| dnnl\\_gOwI64o4i | | +| dnnl\\_gIwo32i | | +| dnnl\\_gIwO32i2o | | +| dnnl\\_gIwO32i4o | | +| dnnl\\_gIwo48i | | +| dnnl\\_gIwO48i2o | | +| dnnl\\_gIwO48i4o | | +| dnnl\\_gIwo64i | | +| dnnl\\_gIwO64i2o | | +| dnnl\\_gIwO64i4o | | +| dnnl\\_gwio | | +| dnnl\\_gwIo2i | | +| dnnl\\_gwIo4i | | +| dnnl\\_OhwI24o | | +| dnnl\\_OhwI24o2i | | +| dnnl\\_OhwI24o4i | | +| dnnl\\_OhwI32o | | +| dnnl\\_OhwI32o2i | | +| dnnl\\_OhwI32o4i | | +| dnnl\\_Ohwi48o | | +| dnnl\\_OhwI48o2i | | +| dnnl\\_OhwI48o4i | | +| dnnl\\_Ohwi64o | | +| dnnl\\_OhwI64o2i | | +| dnnl\\_OhwI64o4i | | +| dnnl\\_Ihwo32i | | +| dnnl\\_IhwO32i2o | | +| dnnl\\_IhwO32i4o | | +| dnnl\\_Ihwo48i | | +| dnnl\\_IhwO48i2o | | +| dnnl\\_IhwO48i4o | | +| dnnl\\_Ihwo64i | | +| dnnl\\_IhwO64i2o | | +| dnnl\\_IhwO64i4o | | +| dnnl\\_hwIo2i | | +| dnnl\\_hwIo4i | | +| dnnl\\_gOhwI24o | | +| dnnl\\_gOhwI32o | | +| dnnl\\_gOhwI32o2i | | +| dnnl\\_gOhwI32o4i | | +| dnnl\\_gOhwi48o | | +| dnnl\\_gOhwI48o2i | | +| dnnl\\_gOhwI48o4i | | +| dnnl\\_gOhwi64o | | +| dnnl\\_gOhwI64o2i | | +| dnnl\\_gOhwI64o4i | | +| dnnl\\_gIhwo32i | | +| dnnl\\_gIhwO32i2o | | +| dnnl\\_gIhwO32i4o | | +| dnnl\\_gIhwo48i | | +| dnnl\\_gIhwO48i2o | | +| dnnl\\_gIhwO48i4o | | +| dnnl\\_gIhwo64i | | +| dnnl\\_gIhwO64i2o | | +| dnnl\\_gIhwO64i4o | | +| dnnl\\_ghwio | | +| dnnl\\_ghwIo2i | | +| dnnl\\_ghwIo4i | | +| dnnl\\_Odhwi24o | | +| dnnl\\_OdhwI24o2i | | +| dnnl\\_OdhwI24o4i | | +| dnnl\\_Odhwi32o | | +| dnnl\\_OdhwI32o2i | | +| dnnl\\_OdhwI32o4i | | +| dnnl\\_Odhwi48o | | +| dnnl\\_OdhwI48o2i | | +| dnnl\\_OdhwI48o4i | | +| dnnl\\_Odhwi64o | | +| dnnl\\_OdhwI64o2i | | +| dnnl\\_OdhwI64o4i | | +| dnnl\\_Idhwo32i | | +| dnnl\\_IdhwO32i2o | | +| dnnl\\_IdhwO32i4o | | +| dnnl\\_Idhwo48i | | +| dnnl\\_IdhwO48i2o | | +| dnnl\\_IdhwO48i4o | | +| dnnl\\_Idhwo64i | | +| dnnl\\_IdhwO64i2o | | +| dnnl\\_IdhwO64i4o | | +| dnnl\\_dhwIo2i | | +| dnnl\\_dhwIo4i | | +| dnnl\\_gOdhwi24o | | +| dnnl\\_gOdhwI24o2i | | +| dnnl\\_gOdhwI24o4i | | +| dnnl\\_gOdhwi32o | | +| dnnl\\_gOdhwI32o2i | | +| dnnl\\_gOdhwI32o4i | | +| dnnl\\_gOdhwi48o | | +| dnnl\\_gOdhwI48o2i | | +| dnnl\\_gOdhwI48o4i | | +| dnnl\\_gOdhwi64o | | +| dnnl\\_gOdhwI64o2i | | +| dnnl\\_gOdhwI64o4i | | +| dnnl\\_gIdhwo32i | | +| dnnl\\_gIdhwO32i2o | | +| dnnl\\_gIdhwO32i4o | | +| dnnl\\_gIdhwo48i | | +| dnnl\\_gIdhwO48i2o | | +| dnnl\\_gIdhwO48i4o | | +| dnnl\\_gIdhwo64i | | +| dnnl\\_gIdhwO64i2o | | +| dnnl\\_gIdhwO64i4o | | +| dnnl\\_gdhwio | | +| dnnl\\_gdhwIo2i | | +| dnnl\\_gdhwIo4i | | +| dnnl\\_OI16i32o4i | | +| dnnl\\_OI16i48o4i | | +| dnnl\\_OI16i64o4i | | +| dnnl\\_OI16i16o2i | | +| dnnl\\_OI16i32o2i | | +| dnnl\\_OI16i48o2i | | +| dnnl\\_OI16i64o2i | | +| dnnl\\_OIw16i32o4i | | +| dnnl\\_OIw16i48o4i | | +| dnnl\\_OIw16i64o4i | | +| dnnl\\_OIw16i32o2i | | +| dnnl\\_OIw16i48o2i | | +| dnnl\\_OIw16i64o2i | | +| dnnl\\_OIhw16i32o4i | | +| dnnl\\_OIhw16i48o4i | | +| dnnl\\_OIhw16i64o4i | | +| dnnl\\_OIhw16i32o2i | | +| dnnl\\_OIhw16i48o2i | | +| dnnl\\_OIhw16i64o2i | | +| dnnl\\_OIdhw16i32o4i | | +| dnnl\\_OIdhw16i48o4i | | +| dnnl\\_OIdhw16i64o4i | | +| dnnl\\_OIdhw16i32o2i | | +| dnnl\\_OIdhw16i48o2i | | +| dnnl\\_OIdhw16i64o2i | | +| dnnl\\_OwI16i16o2i | | +| dnnl\\_OwI16i16o4i | | +| dnnl\\_OhwI16i16o2i | | +| dnnl\\_OhwI16i16o4i | | +| dnnl\\_OdhwI16i16o2i | | +| dnnl\\_OdhwI16i16o4i | | +| dnnl\\_IwO16o16i2o | | +| dnnl\\_IwO16o16i4o | | +| dnnl\\_IhwO16o16i2o | | +| dnnl\\_IhwO16o16i4o | | +| dnnl\\_IdhwO16o16i2o | | +| dnnl\\_IdhwO16o16i4o | | +| dnnl\\_gOwI16i16o2i | | +| dnnl\\_gOwI16i16o4i | | +| dnnl\\_gOhwI16i16o2i | | +| dnnl\\_gOhwI16i16o4i | | +| dnnl\\_gOdhwI16i16o2i | | +| dnnl\\_gOdhwI16i16o4i | | +| dnnl\\_gIwO16o16i2o | | +| dnnl\\_gIwO16o16i4o | | +| dnnl\\_gIhwO16o16i2o | | +| dnnl\\_gIhwO16o16i4o | | +| dnnl\\_gIdhwO16o16i2o | | +| dnnl\\_gIdhwO16o16i4o | | +| dnnl\\_OwI16i32o2i | | +| dnnl\\_OwI16i32o4i | | +| dnnl\\_OwI16i48o2i | | +| dnnl\\_OwI16i48o4i | | +| dnnl\\_OwI16i64o2i | | +| dnnl\\_OwI16i64o4i | | +| dnnl\\_IwO16o32i2o | | +| dnnl\\_IwO16o32i4o | | +| dnnl\\_IwO16o48i2o | | +| dnnl\\_IwO16o48i4o | | +| dnnl\\_IwO16o64i2o | | +| dnnl\\_IwO16o64i4o | | +| dnnl\\_gOwI16i32o2i | | +| dnnl\\_gOwI16i32o4i | | +| dnnl\\_gOwI16i48o2i | | +| dnnl\\_gOwI16i48o4i | | +| dnnl\\_gOwI16i64o2i | | +| dnnl\\_gOwI16i64o4i | | +| dnnl\\_gIwO16o32i2o | | +| dnnl\\_gIwO16o32i4o | | +| dnnl\\_gIwO16o48i2o | | +| dnnl\\_gIwO16o48i4o | | +| dnnl\\_gIwO16o64i2o | | +| dnnl\\_gIwO16o64i4o | | +| dnnl\\_OhwI16i32o2i | | +| dnnl\\_OhwI16i32o4i | | +| dnnl\\_OhwI16i48o2i | | +| dnnl\\_OhwI16i48o4i | | +| dnnl\\_OhwI16i64o2i | | +| dnnl\\_OhwI16i64o4i | | +| dnnl\\_IhwO16o32i2o | | +| dnnl\\_IhwO16o32i4o | | +| dnnl\\_IhwO16o48i2o | | +| dnnl\\_IhwO16o48i4o | | +| dnnl\\_IhwO16o64i2o | | +| dnnl\\_IhwO16o64i4o | | +| dnnl\\_gOhwI16i32o2i | | +| dnnl\\_gOhwI16i32o4i | | +| dnnl\\_gOhwI16i48o2i | | +| dnnl\\_gOhwI16i48o4i | | +| dnnl\\_gOhwI16i64o2i | | +| dnnl\\_gOhwI16i64o4i | | +| dnnl\\_gIhwO16o32i2o | | +| dnnl\\_gIhwO16o32i4o | | +| dnnl\\_gIhwO16o48i2o | | +| dnnl\\_gIhwO16o48i4o | | +| dnnl\\_gIhwO16o64i2o | | +| dnnl\\_gIhwO16o64i4o | | +| dnnl\\_OdhwI16i32o2i | | +| dnnl\\_OdhwI16i32o4i | | +| dnnl\\_OdhwI16i48o2i | | +| dnnl\\_OdhwI16i48o4i | | +| dnnl\\_OdhwI16i64o2i | | +| dnnl\\_OdhwI16i64o4i | | +| dnnl\\_IdhwO16o32i2o | | +| dnnl\\_IdhwO16o32i4o | | +| dnnl\\_IdhwO16o48i2o | | +| dnnl\\_IdhwO16o48i4o | | +| dnnl\\_IdhwO16o64i2o | | +| dnnl\\_IdhwO16o64i4o | | +| dnnl\\_gOdhwI16i32o2i | | +| dnnl\\_gOdhwI16i32o4i | | +| dnnl\\_gOdhwI16i48o2i | | +| dnnl\\_gOdhwI16i48o4i | | +| dnnl\\_gOdhwI16i64o2i | | +| dnnl\\_gOdhwI16i64o4i | | +| dnnl\\_gIdhwO16o32i2o | | +| dnnl\\_gIdhwO16o32i4o | | +| dnnl\\_gIdhwO16o48i2o | | +| dnnl\\_gIdhwO16o48i4o | | +| dnnl\\_gIdhwO16o64i2o | | +| dnnl\\_gIdhwO16o64i4o | | +| dnnl\\_hwioG16g | | +| dnnl\\_hwioG8g | | +| dnnl\\_dhwioG16g | | +| dnnl\\_dhwioG8g | | +| dnnl\\_NCdhw40n16c | | +| dnnl\\_NCw40n16c | | +| dnnl\\_NChw40n16c | | +| dnnl\\_NCw40n32c | | +| dnnl\\_NChw40n32c | | +| dnnl\\_NCdhw40n32c | | +| dnnl\\_OIdhw4o8i8o2i | | +| dnnl\\_OIhw4o8i8o2i | | +| dnnl\\_OIw4o8i8o2i | | +| dnnl\\_gOIdhw4o8i8o2i | | +| dnnl\\_gOIhw4o8i8o2i | | +| dnnl\\_gOIw4o8i8o2i | | +| dnnl\\_IOdhw4i8o8i2o | | +| dnnl\\_IOhw4i8o8i2o | | +| dnnl\\_IOw4i8o8i2o | | +| dnnl\\_gIOdhw4i8o8i2o | | +| dnnl\\_gIOhw4i8o8i2o | | +| dnnl\\_gIOw4i8o8i2o | | +| dnnl\\_NCw2c32n8c | | +| dnnl\\_NChw2c32n8c | | +| dnnl\\_NCdhw2c32n8c | | +| dnnl\\_OIw2i8o16i4o | | +| dnnl\\_OIhw2i8o16i4o | | +| dnnl\\_OIdhw2i8o16i4o | | +| dnnl\\_OIw2o8i16o4i | | +| dnnl\\_OIw2o8i16o2i | | +| dnnl\\_IOw2i8o16i4o | | +| dnnl\\_IOw2i8o16i2o | | +| dnnl\\_OIhw2o8i16o4i | | +| dnnl\\_OIhw2o8i16o2i | | +| dnnl\\_IOhw2i8o16i4o | | +| dnnl\\_IOhw2i8o16i2o | | +| dnnl\\_OIdhw2o8i16o4i | | +| dnnl\\_OIdhw2o8i16o2i | | +| dnnl\\_IOdhw2i8o16i4o | | +| dnnl\\_IOdhw2i8o16i2o | | +| dnnl\\_gOIw2o8i16o2i | | +| dnnl\\_gIOw2i8o16i2o | | +| dnnl\\_gIOhw2i8o16i2o | | +| dnnl\\_gIOdhw2i8o16i2o | | +| dnnl\\_gOIhw2o8i16o2i | | +| dnnl\\_gOIdhw2o8i16o2i | | +| dnnl\\_gOIw2o8i16o4i | | +| dnnl\\_gOIhw2o8i16o4i | | +# See also +dev_guide_understanding_memory_formats +""" +@cenum dnnl_format_tag_t::UInt32 begin + dnnl_format_tag_undef = 0 + dnnl_format_tag_any = 1 + dnnl_a = 2 + dnnl_ab = 3 + dnnl_abc = 4 + dnnl_abcd = 5 + dnnl_abcde = 6 + dnnl_abcdef = 7 + dnnl_abcdefg = 8 + dnnl_abcdefgh = 9 + dnnl_abcdefghi = 10 + dnnl_abcdefghij = 11 + dnnl_abcdefghijk = 12 + dnnl_abcdefghijkl = 13 + dnnl_ba = 14 + dnnl_acb = 15 + dnnl_bac = 16 + dnnl_bca = 17 + dnnl_cab = 18 + dnnl_cba = 19 + dnnl_abdc = 20 + dnnl_acbd = 21 + dnnl_acdb = 22 + dnnl_adbc = 23 + dnnl_adcb = 24 + dnnl_bacd = 25 + dnnl_bcda = 26 + dnnl_cdab = 27 + dnnl_cdba = 28 + dnnl_dcab = 29 + dnnl_abced = 30 + dnnl_abdec = 31 + dnnl_acbde = 32 + dnnl_acdeb = 33 + dnnl_adecb = 34 + dnnl_bacde = 35 + dnnl_bcdea = 36 + dnnl_cdeab = 37 + dnnl_cdeba = 38 + dnnl_decab = 39 + dnnl_abcdfe = 40 + dnnl_abdefc = 41 + dnnl_abdfce = 42 + dnnl_acbdef = 43 + dnnl_adefcb = 44 + dnnl_defcab = 45 + dnnl_abcdegf = 46 + dnnl_abcdefhg = 47 + dnnl_abcdefgih = 48 + dnnl_abcdefghji = 49 + dnnl_abcdefghikj = 50 + dnnl_abcdefghijlk = 51 + dnnl_Abc16a = 52 + dnnl_ABc16a16b = 53 + dnnl_ABc32a32b = 54 + dnnl_ABc4a4b = 55 + dnnl_aBc16b = 56 + dnnl_ABc16b16a = 57 + dnnl_Abc4a = 58 + dnnl_aBc32b = 59 + dnnl_aBc4b = 60 + dnnl_ABc4b16a4b = 61 + dnnl_ABc2b8a4b = 62 + dnnl_ABc16b16a4b = 63 + dnnl_ABc16b16a2b = 64 + dnnl_ABc4b4a = 65 + dnnl_ABc8a16b2a = 66 + dnnl_ABc8a8b = 67 + dnnl_ABc8a4b = 68 + dnnl_aBc8b = 69 + dnnl_ABc8b16a2b = 70 + dnnl_BAc8a16b2a = 71 + dnnl_ABc8b8a = 72 + dnnl_Abcd16a = 73 + dnnl_Abcd8a = 74 + dnnl_ABcd16a16b = 75 + dnnl_Abcd32a = 76 + dnnl_ABcd32a32b = 77 + dnnl_aBcd16b = 78 + dnnl_ABcd16b16a = 79 + dnnl_aBCd16b16c = 80 + dnnl_aBCd16c16b = 81 + dnnl_Abcd4a = 82 + dnnl_aBcd32b = 83 + dnnl_aBcd4b = 84 + dnnl_ABcd4b16a4b = 85 + dnnl_ABcd16b16a4b = 86 + dnnl_ABcd16b16a2b = 87 + dnnl_ABcd4b4a = 88 + dnnl_ABcd4a4b = 89 + dnnl_aBCd2c4b2c = 90 + dnnl_aBCd4b8c2b = 91 + dnnl_aBCd4c16b4c = 92 + dnnl_aBCd2c8b4c = 93 + dnnl_aBCd16c16b4c = 94 + dnnl_aBCd16c16b2c = 95 + dnnl_aBCd4c4b = 96 + dnnl_aBCd4b4c = 97 + dnnl_ABcd8a16b2a = 98 + dnnl_ABcd2b8a4b = 99 + dnnl_ABcd8a8b = 100 + dnnl_ABcd8a4b = 101 + dnnl_aBcd8b = 102 + dnnl_aBCd4c8b2c = 103 + dnnl_ABcd8b16a2b = 104 + dnnl_aBCd8b16c2b = 105 + dnnl_BAcd8a16b2a = 106 + dnnl_ABcd8b8a = 107 + dnnl_aBCd8b8c = 108 + dnnl_aBCd8b4c = 109 + dnnl_aBCd8c16b2c = 110 + dnnl_ABcde8a16b2a = 111 + dnnl_aCBd8b16c2b = 112 + dnnl_aBCd8c8b = 113 + dnnl_Abcde16a = 114 + dnnl_Abcde32a = 115 + dnnl_ABcde16a16b = 116 + dnnl_BAcde8a16b2a = 117 + dnnl_aBCd2b4c2b = 118 + dnnl_ABcde4b16a4b = 119 + dnnl_ABcde2b8a4b = 120 + dnnl_aBcde16b = 121 + dnnl_ABcde16b16a = 122 + dnnl_aBCde16b16c = 123 + dnnl_aBCde16c16b = 124 + dnnl_aBCde2c8b4c = 125 + dnnl_Abcde4a = 126 + dnnl_aBcde32b = 127 + dnnl_aBcde4b = 128 + dnnl_ABcde4b4a = 129 + dnnl_ABcde4a4b = 130 + dnnl_aBCde4b4c = 131 + dnnl_aBCde2c4b2c = 132 + dnnl_aBCde4b8c2b = 133 + dnnl_aBCde4c16b4c = 134 + dnnl_aBCde16c16b4c = 135 + dnnl_aBCde16c16b2c = 136 + dnnl_aBCde4c4b = 137 + dnnl_Abcde8a = 138 + dnnl_ABcde8a8b = 139 + dnnl_ABcde8a4b = 140 + dnnl_BAcde16b16a = 141 + dnnl_aBcde8b = 142 + dnnl_ABcde8b16a2b = 143 + dnnl_aBCde8b16c2b = 144 + dnnl_aBCde4c8b2c = 145 + dnnl_aCBde8b16c2b = 146 + dnnl_ABcde8b8a = 147 + dnnl_ABcde32a32b = 148 + dnnl_aBCde8b8c = 149 + dnnl_aBCde8b4c = 150 + dnnl_ABc4a8b8a4b = 151 + dnnl_ABcd4a8b8a4b = 152 + dnnl_ABcde4a8b8a4b = 153 + dnnl_BAc4b8a8b4a = 154 + dnnl_BAcd4b8a8b4a = 155 + dnnl_BAcde4b8a8b4a = 156 + dnnl_ABcd2a8b8a2b = 157 + dnnl_aBCd4b8c8b4c = 158 + dnnl_aBCde4b8c8b4c = 159 + dnnl_aBCde2b8c8b2c = 160 + dnnl_aBCde8c16b2c = 161 + dnnl_aBCde8c8b = 162 + dnnl_aBCde2b4c2b = 163 + dnnl_aBcdef16b = 164 + dnnl_aBCdef16b16c = 165 + dnnl_aBCdef16c16b = 166 + dnnl_aBCdef4c16b4c = 167 + dnnl_aBCdef2c8b4c = 168 + dnnl_aBCdef4c8b2c = 169 + dnnl_aBCdef2b4c2b = 170 + dnnl_aBcdef4b = 171 + dnnl_aBCdef4c4b = 172 + dnnl_aBCdef4b4c = 173 + dnnl_aBCdef2c4b2c = 174 + dnnl_aBCdef4b8c2b = 175 + dnnl_aBCdef8b8c = 176 + dnnl_aBCdef8b4c = 177 + dnnl_aBCdef8c16b2c = 178 + dnnl_aBCdef4b8c8b4c = 179 + dnnl_aBCdef8b16c2b = 180 + dnnl_aCBdef8b16c2b = 181 + dnnl_aBCdef8c8b = 182 + dnnl_aBdc16b = 183 + dnnl_aBdC16b2c = 184 + dnnl_aBdC16b4c = 185 + dnnl_aBdc4b = 186 + dnnl_aBdc8b = 187 + dnnl_aBdec16b = 188 + dnnl_aBdeC16b2c = 189 + dnnl_aBdeC16b4c = 190 + dnnl_aBdec32b = 191 + dnnl_aBdec4b = 192 + dnnl_aBdec8b = 193 + dnnl_aBdefc16b = 194 + dnnl_aBdefC16b2c = 195 + dnnl_aCBdef16c16b = 196 + dnnl_aBdefc4b = 197 + dnnl_aBdefc8b = 198 + dnnl_Abcdef16a = 199 + dnnl_Abcdef32a = 200 + dnnl_aBedc16b = 201 + dnnl_Acb16a = 202 + dnnl_AcB16a2b = 203 + dnnl_AcB16a4b = 204 + dnnl_Acb4a = 205 + dnnl_Acb8a = 206 + dnnl_aCBd16b16c = 207 + dnnl_aCBd16c16b = 208 + dnnl_aCBde16b16c = 209 + dnnl_aCBde16c16b = 210 + dnnl_Acdb16a = 211 + dnnl_AcdB16a2b = 212 + dnnl_AcdB16a4b = 213 + dnnl_Acdb32a = 214 + dnnl_Acdb4a = 215 + dnnl_Acdb8a = 216 + dnnl_Acdeb16a = 217 + dnnl_AcdeB16a2b = 218 + dnnl_Acdeb4a = 219 + dnnl_Acdeb8a = 220 + dnnl_Adcb16a = 221 + dnnl_BAc16a16b = 222 + dnnl_BAc16b16a = 223 + dnnl_BAcd16a16b = 224 + dnnl_BAcd16b16a = 225 + dnnl_aCBd4c8b8c4b = 226 + dnnl_aCBde4c8b8c4b = 227 + dnnl_aCBdef4c8b8c4b = 228 + dnnl_BAcde16a16b = 229 + dnnl_aCBdef16b16c = 230 + dnnl_ABc16b32a = 231 + dnnl_ABc16b64a = 232 + dnnl_ABc4b32a4b = 233 + dnnl_ABc4b64a4b = 234 + dnnl_ABc8b32a2b = 235 + dnnl_ABc8b64a2b = 236 + dnnl_AB16b16a = 237 + dnnl_AB16b32a = 238 + dnnl_AB16b64a = 239 + dnnl_AB8b16a2b = 240 + dnnl_AB8b32a2b = 241 + dnnl_AB8b64a2b = 242 + dnnl_AB4b16a4b = 243 + dnnl_AB4b32a4b = 244 + dnnl_AB4b64a4b = 245 + dnnl_AB16b16a4b = 246 + dnnl_ABcd16b32a = 247 + dnnl_ABcd16b64a = 248 + dnnl_ABcd4b32a4b = 249 + dnnl_ABcd4b64a4b = 250 + dnnl_ABcd8b32a2b = 251 + dnnl_ABcd8b64a2b = 252 + dnnl_ABcde4b32a4b = 253 + dnnl_ABcde4b64a4b = 254 + dnnl_ABcde16b16a4b = 255 + dnnl_ABcde16b16a2b = 256 + dnnl_ABcde16b32a = 257 + dnnl_ABcde16b64a = 258 + dnnl_ABcde8b32a2b = 259 + dnnl_ABcde8b64a2b = 260 + dnnl_aBCdef16c16b4c = 261 + dnnl_aBCdef16c16b2c = 262 + dnnl_AB32a32b8a4b = 263 + dnnl_AB8a4b = 264 + dnnl_AB32a32b8a2b = 265 + dnnl_AB8a2b = 266 + dnnl_abDc32d = 267 + dnnl_abDC32d4c = 268 + dnnl_abdEc32e = 269 + dnnl_abdEC32e2c = 270 + dnnl_abdEC32e4c = 271 + dnnl_aBdefC16b4c = 272 + dnnl_AcdeB16a4b = 273 + dnnl_ABcd16a16b2a = 274 + dnnl_ABc16a16b2a = 275 + dnnl_aBCd16b16c2b = 276 + dnnl_aBCde16b16c2b = 277 + dnnl_Acb32a = 278 + dnnl_AcB32a2b = 279 + dnnl_AcB32a4b = 280 + dnnl_Acb48a = 281 + dnnl_AcB48a2b = 282 + dnnl_AcB48a4b = 283 + dnnl_Acb64a = 284 + dnnl_AcB64a2b = 285 + dnnl_AcB64a4b = 286 + dnnl_cBa2b = 287 + dnnl_cBa4b = 288 + dnnl_aBdc32b = 289 + dnnl_aBdC32b2c = 290 + dnnl_aBdC32b4c = 291 + dnnl_aBdc48b = 292 + dnnl_aBdC48b2c = 293 + dnnl_aBdC48b4c = 294 + dnnl_aBdc64b = 295 + dnnl_aBdC64b2c = 296 + dnnl_aBdC64b4c = 297 + dnnl_adCb2c = 298 + dnnl_adCb4c = 299 + dnnl_AcdB32a2b = 300 + dnnl_AcdB32a4b = 301 + dnnl_Acdb48a = 302 + dnnl_AcdB48a2b = 303 + dnnl_AcdB48a4b = 304 + dnnl_Acdb64a = 305 + dnnl_AcdB64a2b = 306 + dnnl_AcdB64a4b = 307 + dnnl_cdBa2b = 308 + dnnl_cdBa4b = 309 + dnnl_aBdeC32b2c = 310 + dnnl_aBdeC32b4c = 311 + dnnl_aBdec48b = 312 + dnnl_aBdeC48b2c = 313 + dnnl_aBdeC48b4c = 314 + dnnl_aBdec64b = 315 + dnnl_aBdeC64b2c = 316 + dnnl_aBdeC64b4c = 317 + dnnl_adeCb2c = 318 + dnnl_adeCb4c = 319 + dnnl_Acdeb32a = 320 + dnnl_AcdeB32a2b = 321 + dnnl_AcdeB32a4b = 322 + dnnl_Acdeb48a = 323 + dnnl_AcdeB48a2b = 324 + dnnl_AcdeB48a4b = 325 + dnnl_Acdeb64a = 326 + dnnl_AcdeB64a2b = 327 + dnnl_AcdeB64a4b = 328 + dnnl_cdeBa2b = 329 + dnnl_cdeBa4b = 330 + dnnl_aBdefc32b = 331 + dnnl_aBdefC32b2c = 332 + dnnl_aBdefC32b4c = 333 + dnnl_aBdefc48b = 334 + dnnl_aBdefC48b2c = 335 + dnnl_aBdefC48b4c = 336 + dnnl_aBdefc64b = 337 + dnnl_aBdefC64b2c = 338 + dnnl_aBdefC64b4c = 339 + dnnl_adefCb2c = 340 + dnnl_adefCb4c = 341 + dnnl_AB16b32a4b = 342 + dnnl_AB16b48a4b = 343 + dnnl_AB16b64a4b = 344 + dnnl_AB16b16a2b = 345 + dnnl_AB16b32a2b = 346 + dnnl_AB16b48a2b = 347 + dnnl_AB16b64a2b = 348 + dnnl_ABc16b32a4b = 349 + dnnl_ABc16b48a4b = 350 + dnnl_ABc16b64a4b = 351 + dnnl_ABc16b32a2b = 352 + dnnl_ABc16b48a2b = 353 + dnnl_ABc16b64a2b = 354 + dnnl_ABcd16b32a4b = 355 + dnnl_ABcd16b48a4b = 356 + dnnl_ABcd16b64a4b = 357 + dnnl_ABcd16b32a2b = 358 + dnnl_ABcd16b48a2b = 359 + dnnl_ABcd16b64a2b = 360 + dnnl_ABcde16b32a4b = 361 + dnnl_ABcde16b48a4b = 362 + dnnl_ABcde16b64a4b = 363 + dnnl_ABcde16b32a2b = 364 + dnnl_ABcde16b48a2b = 365 + dnnl_ABcde16b64a2b = 366 + dnnl_ABc32a16b = 367 + dnnl_ABcd32a16b = 368 + dnnl_ABcde32a16b = 369 + dnnl_AB48a16b = 370 + dnnl_AB48a32b = 371 + dnnl_ABc40a16b = 372 + dnnl_ABc40a32b = 373 + dnnl_aBC48b16c = 374 + dnnl_aBC48b32c = 375 + dnnl_ABcd40a16b = 376 + dnnl_ABcd40a32b = 377 + dnnl_abCd32c = 378 + dnnl_abdCe32c = 379 + dnnl_abdCE32c2e = 380 + dnnl_BA16a16b2a = 381 + dnnl_BA16a32b2a = 382 + dnnl_BA16a48b2a = 383 + dnnl_BA16a64b2a = 384 + dnnl_BA16a16b4a = 385 + dnnl_BA16a32b4a = 386 + dnnl_BA16a48b4a = 387 + dnnl_BA16a64b4a = 388 + dnnl_ABcd8a2b = 389 + dnnl_aBdeC16c16b2c = 390 + dnnl_aBdeC16c16b4c = 391 + dnnl_aBdefC16c16b2c = 392 + dnnl_AcB16b16a2b = 393 + dnnl_AcB16b16a4b = 394 + dnnl_AcdB16b16a2b = 395 + dnnl_AcdB16b16a4b = 396 + dnnl_AcdeB16b16a2b = 397 + dnnl_aBdefC16c16b4c = 398 + dnnl_AcdeB16b16a4b = 399 + dnnl_AcB16b32a2b = 400 + dnnl_AcB16b32a4b = 401 + dnnl_AcB16b48a2b = 402 + dnnl_AcB16b48a4b = 403 + dnnl_AcB16b64a2b = 404 + dnnl_AcB16b64a4b = 405 + dnnl_aBdC16c16b2c = 406 + dnnl_aBdC16c16b4c = 407 + dnnl_aBdC16c32b2c = 408 + dnnl_aBdC16c32b4c = 409 + dnnl_aBdC16c48b2c = 410 + dnnl_aBdC16c48b4c = 411 + dnnl_aBdC16c64b2c = 412 + dnnl_aBdC16c64b4c = 413 + dnnl_AcdB16b32a2b = 414 + dnnl_AcdB16b32a4b = 415 + dnnl_AcdB16b48a2b = 416 + dnnl_AcdB16b48a4b = 417 + dnnl_AcdB16b64a2b = 418 + dnnl_AcdB16b64a4b = 419 + dnnl_aBdeC16c32b2c = 420 + dnnl_aBdeC16c32b4c = 421 + dnnl_aBdeC16c48b2c = 422 + dnnl_aBdeC16c48b4c = 423 + dnnl_aBdeC16c64b2c = 424 + dnnl_aBdeC16c64b4c = 425 + dnnl_AcdeB16b32a2b = 426 + dnnl_AcdeB16b32a4b = 427 + dnnl_AcdeB16b48a2b = 428 + dnnl_AcdeB16b48a4b = 429 + dnnl_AcdeB16b64a2b = 430 + dnnl_AcdeB16b64a4b = 431 + dnnl_aBdefC16c32b2c = 432 + dnnl_aBdefC16c32b4c = 433 + dnnl_aBdefC16c48b2c = 434 + dnnl_aBdefC16c48b4c = 435 + dnnl_aBdefC16c64b2c = 436 + dnnl_aBdefC16c64b4c = 437 + dnnl_decbA16a = 438 + dnnl_ABc4a2b = 439 + dnnl_ABc8a2b = 440 + dnnl_aBCd8b2c = 441 + dnnl_ABcde4a2b = 442 + dnnl_ABcde8a2b = 443 + dnnl_ABcde40a16b = 444 + dnnl_ABcde40a32b = 445 + dnnl_aBCde8b2c = 446 + dnnl_ABcde4a8b8a2b = 447 + dnnl_ABcd4a8b8a2b = 448 + dnnl_ABc4a8b8a2b = 449 + dnnl_aBCdef4b8c8b2c = 450 + dnnl_aBCde4b8c8b2c = 451 + dnnl_aBCd4b8c8b2c = 452 + dnnl_BAcde4b8a8b2a = 453 + dnnl_BAcd4b8a8b2a = 454 + dnnl_BAc4b8a8b2a = 455 + dnnl_aCBdef4c8b8c2b = 456 + dnnl_aCBde4c8b8c2b = 457 + dnnl_aCBd4c8b8c2b = 458 + dnnl_aBCdef8b2c = 459 + dnnl_AB32a16b = 460 + dnnl_AB32a32b = 461 + dnnl_BA4b8a8b2a = 462 + dnnl_BA4b8a8b4a = 463 + dnnl_aBC32b16c = 464 + dnnl_aBC32b32c = 465 + dnnl_aCB4c8b8c2b = 466 + dnnl_aCB4c8b8c4b = 467 + dnnl_ABcd4a2b = 468 + dnnl_ABc2b8a16b4a = 469 + dnnl_ABcd2b8a16b4a = 470 + dnnl_ABcde2b8a16b4a = 471 + dnnl_ABc2a8b16a4b = 472 + dnnl_ABc2a8b16a2b = 473 + dnnl_ABc2b32a8b = 474 + dnnl_ABcd2a8b16a4b = 475 + dnnl_ABcd2a8b16a2b = 476 + dnnl_aCBd2c8b16c2b = 477 + dnnl_ABcd2b32a8b = 478 + dnnl_aBCd2c8b16c2b = 479 + dnnl_ABcde2a8b16a4b = 480 + dnnl_ABcde2a8b16a2b = 481 + dnnl_aCBde2c8b16c2b = 482 + dnnl_ABcde2b32a8b = 483 + dnnl_aBC2b8c16b2c = 484 + dnnl_aBCd2b8c16b2c = 485 + dnnl_aBCde2b8c16b2c = 486 + dnnl_aBCdef2b8c16b2c = 487 + dnnl_BAcde2b8a16b4a = 488 + dnnl_BAcd2b8a16b4a = 489 + dnnl_BAc2b8a16b4a = 490 + dnnl_BAcde2b8a16b2a = 491 + dnnl_BAcd2b8a16b2a = 492 + dnnl_BAc2b8a16b2a = 493 + dnnl_aBCde2c8b16c2b = 494 + dnnl_aBCdef2c8b16c2b = 495 + dnnl_aCBdef2c8b16c2b = 496 + dnnl_aBCd2b8c16b4c = 497 + dnnl_aBCde2b8c16b4c = 498 + dnnl_BA4b8a16b2a = 499 + dnnl_BA4b8a16b4a = 500 + dnnl_aCB4c8b16c2b = 501 + dnnl_aCB4c8b16c4b = 502 + dnnl_BA16a16b = 503 + dnnl_BA16a32b = 504 + dnnl_BA16a48b = 505 + dnnl_BA16a64b = 506 + dnnl_aCB16c2b = 507 + dnnl_aCB16c4b = 508 + dnnl_BA16b2a = 509 + dnnl_BA16b4a = 510 + dnnl_aBC16b16c = 511 + dnnl_aBC16b32c = 512 + dnnl_AB16a16b = 513 + dnnl_AB16a32b = 514 + dnnl_ABcde16a16b2a = 515 + dnnl_aBCdef16b16c2b = 516 + dnnl_Acedb16a = 517 + dnnl_aBdfec16b = 518 + dnnl_abdEC64e2c = 519 + dnnl_abdEC64e4c = 520 + dnnl_aCB16b16c = 521 + dnnl_aCB16b32c = 522 + dnnl_aCB16b48c = 523 + dnnl_aCB16b64c = 524 + dnnl_aCB16b16c2b = 525 + dnnl_aCB16b32c2b = 526 + dnnl_aCB16b48c2b = 527 + dnnl_aCB16b64c2b = 528 + dnnl_aCB16b16c4b = 529 + dnnl_aCB16b32c4b = 530 + dnnl_aCB16b48c4b = 531 + dnnl_aCB16b64c4b = 532 + dnnl_abCd4c = 533 + dnnl_abCde4c = 534 + dnnl_abCdef4c = 535 + dnnl_abCde32c = 536 + dnnl_abCdef32c = 537 + dnnl_ABcd16a32b = 538 + dnnl_decbA8a = 539 + dnnl_aCdefB16b32c2b = 540 + dnnl_aCdefB16b32c4b = 541 + dnnl_aCdefB16b48c2b = 542 + dnnl_aCdefB16b48c4b = 543 + dnnl_aCdefB16b64c2b = 544 + dnnl_aCdefB16b64c4b = 545 + dnnl_BcdeA16a32b2a = 546 + dnnl_BcdeA16a32b4a = 547 + dnnl_BcdeA16a48b2a = 548 + dnnl_BcdeA16a48b4a = 549 + dnnl_BcdeA16a64b2a = 550 + dnnl_BcdeA16a64b4a = 551 + dnnl_aCdefb32c = 552 + dnnl_aCdefB32c2b = 553 + dnnl_aCdefB32c4b = 554 + dnnl_aCdefb48c = 555 + dnnl_aCdefB48c2b = 556 + dnnl_aCdefB48c4b = 557 + dnnl_aCdefb64c = 558 + dnnl_aCdefB64c2b = 559 + dnnl_aCdefB64c4b = 560 + dnnl_Bcdea32b = 561 + dnnl_BcdeA32b2a = 562 + dnnl_BcdeA32b4a = 563 + dnnl_Bcdea48b = 564 + dnnl_BcdeA48b2a = 565 + dnnl_BcdeA48b4a = 566 + dnnl_Bcdea64b = 567 + dnnl_BcdeA64b2a = 568 + dnnl_BcdeA64b4a = 569 + dnnl_Bca32b = 570 + dnnl_BcA32b2a = 571 + dnnl_BcA32b4a = 572 + dnnl_Bca48b = 573 + dnnl_BcA48b2a = 574 + dnnl_BcA48b4a = 575 + dnnl_Bca64b = 576 + dnnl_BcA64b2a = 577 + dnnl_BcA64b4a = 578 + dnnl_aCdb32c = 579 + dnnl_aCdB32c2b = 580 + dnnl_aCdB32c4b = 581 + dnnl_aCdb48c = 582 + dnnl_aCdB48c2b = 583 + dnnl_aCdB48c4b = 584 + dnnl_aCdb64c = 585 + dnnl_aCdB64c2b = 586 + dnnl_aCdB64c4b = 587 + dnnl_BcA16a16b2a = 588 + dnnl_BcA16a16b4a = 589 + dnnl_BcdA16a16b2a = 590 + dnnl_BcdA16a16b4a = 591 + dnnl_BcdeA16a16b2a = 592 + dnnl_BcdeA16a16b4a = 593 + dnnl_aCdB16b16c2b = 594 + dnnl_aCdB16b16c4b = 595 + dnnl_aCdeB16b16c2b = 596 + dnnl_aCdeB16b16c4b = 597 + dnnl_aCdefB16b16c2b = 598 + dnnl_aCdefB16b16c4b = 599 + dnnl_BcA16a32b2a = 600 + dnnl_BcA16a32b4a = 601 + dnnl_BcA16a48b2a = 602 + dnnl_BcA16a48b4a = 603 + dnnl_BcA16a64b2a = 604 + dnnl_BcA16a64b4a = 605 + dnnl_aCdB16b32c2b = 606 + dnnl_aCdB16b32c4b = 607 + dnnl_aCdB16b48c2b = 608 + dnnl_aCdB16b48c4b = 609 + dnnl_aCdB16b64c2b = 610 + dnnl_aCdB16b64c4b = 611 + dnnl_BcdA16a32b2a = 612 + dnnl_BcdA16a32b4a = 613 + dnnl_BcdA16a48b2a = 614 + dnnl_BcdA16a48b4a = 615 + dnnl_BcdA16a64b2a = 616 + dnnl_BcdA16a64b4a = 617 + dnnl_aCdeB16b32c2b = 618 + dnnl_aCdeB16b32c4b = 619 + dnnl_aCdeB16b48c2b = 620 + dnnl_aCdeB16b48c4b = 621 + dnnl_aCdeB16b64c2b = 622 + dnnl_aCdeB16b64c4b = 623 + dnnl_Bca16b = 624 + dnnl_BcA16b2a = 625 + dnnl_BcA16b4a = 626 + dnnl_Bcda16b = 627 + dnnl_BcdA16b2a = 628 + dnnl_BcdA16b4a = 629 + dnnl_Bcdea16b = 630 + dnnl_BcdeA16b2a = 631 + dnnl_BcdeA16b4a = 632 + dnnl_aCdb16c = 633 + dnnl_aCdB16c2b = 634 + dnnl_aCdB16c4b = 635 + dnnl_aCdeb16c = 636 + dnnl_aCdeB16c2b = 637 + dnnl_aCdeB16c4b = 638 + dnnl_aCdefb16c = 639 + dnnl_aCdefB16c2b = 640 + dnnl_aCdefB16c4b = 641 + dnnl_Bcda32b = 642 + dnnl_BcdA32b2a = 643 + dnnl_BcdA32b4a = 644 + dnnl_Bcda48b = 645 + dnnl_BcdA48b2a = 646 + dnnl_BcdA48b4a = 647 + dnnl_Bcda64b = 648 + dnnl_BcdA64b2a = 649 + dnnl_BcdA64b4a = 650 + dnnl_aCdeb32c = 651 + dnnl_aCdeB32c2b = 652 + dnnl_aCdeB32c4b = 653 + dnnl_aCdeb48c = 654 + dnnl_aCdeB48c2b = 655 + dnnl_aCdeB48c4b = 656 + dnnl_aCdeb64c = 657 + dnnl_aCdeB64c2b = 658 + dnnl_aCdeB64c4b = 659 + dnnl_Acb24a = 660 + dnnl_Acdb24a = 661 + dnnl_Acdeb24a = 662 + dnnl_aBdc24b = 663 + dnnl_aBdec24b = 664 + dnnl_aBdefc24b = 665 + dnnl_abDc16d = 666 + dnnl_abdEc16e = 667 + dnnl_abdCe16c = 668 + dnnl_AcB24a2b = 669 + dnnl_AcdB24a2b = 670 + dnnl_AcdeB24a2b = 671 + dnnl_aBdC24b2c = 672 + dnnl_aBdeC24b2c = 673 + dnnl_aBdefC24b2c = 674 + dnnl_AcB8a2b = 675 + dnnl_AcdB8a2b = 676 + dnnl_AcdeB8a2b = 677 + dnnl_aBdC8b2c = 678 + dnnl_aBdeC8b2c = 679 + dnnl_aBdefC8b2c = 680 + dnnl_AB8b32a = 681 + dnnl_ABc8b32a = 682 + dnnl_ABcd8b32a = 683 + dnnl_ABcde8b32a = 684 + dnnl_AB8b24a = 685 + dnnl_ABc8b24a = 686 + dnnl_ABcd8b24a = 687 + dnnl_ABcde8b24a = 688 + dnnl_AB8b16a = 689 + dnnl_ABc8b16a = 690 + dnnl_ABcd8b16a = 691 + dnnl_ABcde8b16a = 692 + dnnl_AB8b8a = 693 + dnnl_AB4b8a4b = 694 + dnnl_AB4b24a4b = 695 + dnnl_ABc4b8a4b = 696 + dnnl_ABc4b24a4b = 697 + dnnl_ABcd4b8a4b = 698 + dnnl_ABcd4b24a4b = 699 + dnnl_ABcde4b8a4b = 700 + dnnl_ABcde4b24a4b = 701 + dnnl_AB8b24a2b = 702 + dnnl_ABc8b24a2b = 703 + dnnl_ABcd8b24a2b = 704 + dnnl_ABcde8b24a2b = 705 + dnnl_AB8b8a2b = 706 + dnnl_ABc8b8a2b = 707 + dnnl_ABcd8b8a2b = 708 + dnnl_ABcde8b8a2b = 709 + dnnl_AcB24a4b = 710 + dnnl_AcdB24a4b = 711 + dnnl_AcdeB24a4b = 712 + dnnl_aBdC24b4c = 713 + dnnl_aBdeC24b4c = 714 + dnnl_aBdefC24b4c = 715 + dnnl_AcB8a4b = 716 + dnnl_AcdB8a4b = 717 + dnnl_AcdeB8a4b = 718 + dnnl_aBdC8b4c = 719 + dnnl_aBdeC8b4c = 720 + dnnl_aBdefC8b4c = 721 + dnnl_Bca8b = 722 + dnnl_BcA8b2a = 723 + dnnl_Bcda8b = 724 + dnnl_BcdA8b2a = 725 + dnnl_Bcdea8b = 726 + dnnl_BcdeA8b2a = 727 + dnnl_aCdb8c = 728 + dnnl_aCdB8c2b = 729 + dnnl_aCdeb8c = 730 + dnnl_aCdeB8c2b = 731 + dnnl_aCdefb8c = 732 + dnnl_aCdefB8c2b = 733 + dnnl_Bca24b = 734 + dnnl_BcA24b2a = 735 + dnnl_Bcda24b = 736 + dnnl_BcdA24b2a = 737 + dnnl_Bcdea24b = 738 + dnnl_BcdeA24b2a = 739 + dnnl_aCdb24c = 740 + dnnl_aCdB24c2b = 741 + dnnl_aCdeb24c = 742 + dnnl_aCdeB24c2b = 743 + dnnl_aCdefb24c = 744 + dnnl_aCdefB24c2b = 745 + dnnl_BcA8b4a = 746 + dnnl_BcdA8b4a = 747 + dnnl_BcdeA8b4a = 748 + dnnl_aCdB8c4b = 749 + dnnl_aCdeB8c4b = 750 + dnnl_aCdefB8c4b = 751 + dnnl_BcA24b4a = 752 + dnnl_BcdA24b4a = 753 + dnnl_BcdeA24b4a = 754 + dnnl_aCdB24c4b = 755 + dnnl_aCdeB24c4b = 756 + dnnl_aCdefB24c4b = 757 + dnnl_AB16b48a = 758 + dnnl_ABc16b48a = 759 + dnnl_ABcd16b48a = 760 + dnnl_ABcde16b48a = 761 + dnnl_ABc16a4b = 762 + dnnl_ABcd16a4b = 763 + dnnl_ABcde16a4b = 764 + dnnl_defcbA16a = 765 + dnnl_defcbA8a = 766 + dnnl_AcB16b64a = 767 + dnnl_AcdB16b64a = 768 + dnnl_AcdeB16b64a = 769 + dnnl_AcB16b48a = 770 + dnnl_AcdB16b48a = 771 + dnnl_AcdeB16b48a = 772 + dnnl_AcB16b32a = 773 + dnnl_AcdB16b32a = 774 + dnnl_AcdeB16b32a = 775 + dnnl_AcB16b16a = 776 + dnnl_AcdB16b16a = 777 + dnnl_AcdeB16b16a = 778 + dnnl_AcB8b32a = 779 + dnnl_AcdB8b32a = 780 + dnnl_AcdeB8b32a = 781 + dnnl_AcB8b24a = 782 + dnnl_AcdB8b24a = 783 + dnnl_AcdeB8b24a = 784 + dnnl_AcB8b16a = 785 + dnnl_AcdB8b16a = 786 + dnnl_AcdeB8b16a = 787 + dnnl_AcB8b8a = 788 + dnnl_AcdB8b8a = 789 + dnnl_AcdeB8b8a = 790 + dnnl_AcB8b64a2b = 791 + dnnl_AcdB8b64a2b = 792 + dnnl_AcdeB8b64a2b = 793 + dnnl_AcB8b32a2b = 794 + dnnl_AcdB8b32a2b = 795 + dnnl_AcdeB8b32a2b = 796 + dnnl_AcB8b24a2b = 797 + dnnl_AcdB8b24a2b = 798 + dnnl_AcdeB8b24a2b = 799 + dnnl_AcB8b16a2b = 800 + dnnl_AcdB8b16a2b = 801 + dnnl_AcdeB8b16a2b = 802 + dnnl_AcB8b8a2b = 803 + dnnl_AcdB8b8a2b = 804 + dnnl_AcdeB8b8a2b = 805 + dnnl_AcB4b64a4b = 806 + dnnl_AcdB4b64a4b = 807 + dnnl_AcdeB4b64a4b = 808 + dnnl_AcB4b32a4b = 809 + dnnl_AcdB4b32a4b = 810 + dnnl_AcdeB4b32a4b = 811 + dnnl_AcB4b24a4b = 812 + dnnl_AcdB4b24a4b = 813 + dnnl_AcdeB4b24a4b = 814 + dnnl_AcB4b16a4b = 815 + dnnl_AcdB4b16a4b = 816 + dnnl_AcdeB4b16a4b = 817 + dnnl_AcB4b8a4b = 818 + dnnl_AcdB4b8a4b = 819 + dnnl_AcdeB4b8a4b = 820 + dnnl_Ab4a = 821 + dnnl_Ab8a = 822 + dnnl_BA4b4a = 823 + dnnl_BA8b4a = 824 + dnnl_BA2a24b = 825 + dnnl_aCB2b24c = 826 + dnnl_BA2a8b = 827 + dnnl_aCB2b8c = 828 + dnnl_BA8a24b = 829 + dnnl_aCB8b24c = 830 + dnnl_BA8a16b = 831 + dnnl_aCB8b16c = 832 + dnnl_BA8a8b = 833 + dnnl_aCB8b8c = 834 + dnnl_bcad = 835 + dnnl_cabd = 836 + dnnl_dabc = 837 + dnnl_format_tag_last = 838 + dnnl_x = 2 + dnnl_nc = 3 + dnnl_cn = 14 + dnnl_tn = 3 + dnnl_nt = 14 + dnnl_ncw = 4 + dnnl_nwc = 15 + dnnl_nchw = 5 + dnnl_nhwc = 22 + dnnl_chwn = 26 + dnnl_ncdhw = 6 + dnnl_ndhwc = 33 + dnnl_oi = 3 + dnnl_io = 14 + dnnl_oiw = 4 + dnnl_owi = 15 + dnnl_wio = 19 + dnnl_woi = 18 + dnnl_iwo = 17 + dnnl_oihw = 5 + dnnl_hwio = 28 + dnnl_hwoi = 27 + dnnl_ohwi = 22 + dnnl_ihwo = 26 + dnnl_iohw = 25 + dnnl_oidhw = 6 + dnnl_iodhw = 35 + dnnl_dhwio = 38 + dnnl_dhwoi = 37 + dnnl_odhwi = 33 + dnnl_idhwo = 36 + dnnl_goiw = 5 + dnnl_gowi = 20 + dnnl_wigo = 29 + dnnl_goihw = 6 + dnnl_gohwi = 31 + dnnl_hwigo = 39 + dnnl_giohw = 32 + dnnl_goidhw = 7 + dnnl_godhwi = 41 + dnnl_giodhw = 43 + dnnl_dhwigo = 45 + dnnl_tnc = 4 + dnnl_ntc = 16 + dnnl_ldnc = 5 + dnnl_ldigo = 6 + dnnl_ldgoi = 31 + dnnl_ldio = 5 + dnnl_ldoi = 20 + dnnl_ldgo = 5 + dnnl_ldOi16o = 666 + dnnl_ldOi32o = 267 + dnnl_ldOI32o4i = 268 + dnnl_ldIo32i = 378 + dnnl_ldgOi16o = 667 + dnnl_ldgOi32o = 269 + dnnl_ldgOI32o2i = 270 + dnnl_ldgOI32o4i = 271 + dnnl_ldgOI64o2i = 519 + dnnl_ldgOI64o4i = 520 + dnnl_ldgIo16i = 668 + dnnl_ldgIo32i = 379 + dnnl_ldgIO32i2o = 380 + dnnl_nCdhw32c = 127 + dnnl_nCdhw16c = 121 + dnnl_nCdhw4c = 128 + dnnl_nCdhw8c = 142 + dnnl_nChw32c = 83 + dnnl_nChw16c = 78 + dnnl_nChw4c = 84 + dnnl_nChw8c = 102 + dnnl_nCw32c = 59 + dnnl_nCw16c = 56 + dnnl_nCw4c = 60 + dnnl_nCw8c = 69 + dnnl_NCw16n16c = 53 + dnnl_NCdhw16n16c = 116 + dnnl_NChw16n16c = 75 + dnnl_NCw32n16c = 367 + dnnl_NChw32n16c = 368 + dnnl_NChw16n32c = 538 + dnnl_NCdhw32n16c = 369 + dnnl_NCw32n32c = 54 + dnnl_NChw32n32c = 77 + dnnl_NCdhw32n32c = 148 + dnnl_OI16i16o = 237 + dnnl_OI16i32o = 238 + dnnl_OI16i48o = 758 + dnnl_OI16i64o = 239 + dnnl_OI8i8o2i = 706 + dnnl_OI8i16o2i = 240 + dnnl_OI8i24o2i = 702 + dnnl_OI8i32o2i = 241 + dnnl_OI8i64o2i = 242 + dnnl_OI4i8o4i = 694 + dnnl_OI4i16o4i = 243 + dnnl_OI4i24o4i = 695 + dnnl_OI4i32o4i = 244 + dnnl_OI4i64o4i = 245 + dnnl_OI16i16o4i = 246 + dnnl_OI8i32o = 681 + dnnl_OI8i24o = 685 + dnnl_OI8i16o = 689 + dnnl_OI8i8o = 693 + dnnl_IOw16o16i = 222 + dnnl_IOw16i16o = 223 + dnnl_OIw16i16o = 57 + dnnl_OwI16i16o = 776 + dnnl_OIw16i32o = 231 + dnnl_OwI16i32o = 773 + dnnl_OIw16i48o = 759 + dnnl_OwI16i48o = 770 + dnnl_OIw16i64o = 232 + dnnl_OwI16i64o = 767 + dnnl_OIw16o16i = 53 + dnnl_Oiw16o = 52 + dnnl_OIw4i8o4i = 696 + dnnl_OwI4i8o4i = 818 + dnnl_OIw4i16o4i = 61 + dnnl_OwI4i16o4i = 815 + dnnl_OIw4i24o4i = 697 + dnnl_OwI4i24o4i = 812 + dnnl_OIw4i32o4i = 233 + dnnl_OwI4i32o4i = 809 + dnnl_OIw4i64o4i = 234 + dnnl_OwI4i64o4i = 806 + dnnl_OIw2i8o4i = 62 + dnnl_OIw16i16o4i = 63 + dnnl_OIw16i16o2i = 64 + dnnl_OIw16o16i2o = 275 + dnnl_OIw4i4o = 65 + dnnl_OIw4o4i = 55 + dnnl_Oiw4o = 58 + dnnl_OIw8i8o2i = 707 + dnnl_OwI8i8o2i = 803 + dnnl_OIw8i16o2i = 70 + dnnl_OwI8i16o2i = 800 + dnnl_OIw8i24o2i = 703 + dnnl_OwI8i24o2i = 797 + dnnl_OIw8i32o2i = 235 + dnnl_OwI8i32o2i = 794 + dnnl_OIw8i64o2i = 236 + dnnl_OwI8i64o2i = 791 + dnnl_OIw8i8o = 72 + dnnl_OwI8i8o = 788 + dnnl_OIw8o16i2o = 66 + dnnl_IOw8o16i2o = 71 + dnnl_OIw8o8i = 67 + dnnl_OIw8o4i = 68 + dnnl_Owi16o = 202 + dnnl_OwI16o2i = 203 + dnnl_OwI16o4i = 204 + dnnl_Iwo8i = 722 + dnnl_IwO8i2o = 723 + dnnl_IwO8i4o = 746 + dnnl_Iwo16i = 624 + dnnl_IwO16i2o = 625 + dnnl_IwO16i4o = 626 + dnnl_Iwo24i = 734 + dnnl_IwO24i2o = 735 + dnnl_IwO24i4o = 752 + dnnl_Owi4o = 205 + dnnl_Owi8o = 206 + dnnl_OwI8o2i = 675 + dnnl_OIw8i32o = 682 + dnnl_OwI8i32o = 779 + dnnl_OIw8i24o = 686 + dnnl_OwI8i24o = 782 + dnnl_OIw8i16o = 690 + dnnl_OwI8i16o = 785 + dnnl_OwI8o4i = 716 + dnnl_IOhw16i16o = 225 + dnnl_IOhw16o16i = 224 + dnnl_Ohwi16o = 211 + dnnl_OhwI16o2i = 212 + dnnl_OhwI16o4i = 213 + dnnl_Ihwo8i = 724 + dnnl_IhwO8i2o = 725 + dnnl_IhwO8i4o = 747 + dnnl_Ihwo16i = 627 + dnnl_IhwO16i2o = 628 + dnnl_IhwO16i4o = 629 + dnnl_Ihwo24i = 736 + dnnl_IhwO24i2o = 737 + dnnl_IhwO24i4o = 753 + dnnl_Ohwi24o = 661 + dnnl_Ohwi32o = 214 + dnnl_Ohwi4o = 215 + dnnl_Ohwi8o = 216 + dnnl_OhwI8o2i = 676 + dnnl_OhwI8o4i = 717 + dnnl_OIhw16i16o = 79 + dnnl_OhwI16i16o = 777 + dnnl_OIhw16i32o = 247 + dnnl_OhwI16i32o = 774 + dnnl_OIhw16i48o = 760 + dnnl_OhwI16i48o = 771 + dnnl_OIhw16i64o = 248 + dnnl_OhwI16i64o = 768 + dnnl_OIhw16o16i = 75 + dnnl_Oihw16o = 73 + dnnl_OIhw4i8o4i = 698 + dnnl_OhwI4i8o4i = 819 + dnnl_OIhw4i16o4i = 85 + dnnl_OhwI4i16o4i = 816 + dnnl_OIhw4i24o4i = 699 + dnnl_OhwI4i24o4i = 813 + dnnl_OIhw4i32o4i = 249 + dnnl_OhwI4i32o4i = 810 + dnnl_OIhw4i64o4i = 250 + dnnl_OhwI4i64o4i = 807 + dnnl_OIhw16i16o4i = 86 + dnnl_OIhw16i16o2i = 87 + dnnl_OIhw16o16i2o = 274 + dnnl_OIhw4i4o = 88 + dnnl_OIhw4o4i = 89 + dnnl_Oihw4o = 82 + dnnl_OIhw8i8o2i = 708 + dnnl_OhwI8i8o2i = 804 + dnnl_OIhw8i16o2i = 104 + dnnl_OhwI8i16o2i = 801 + dnnl_OIhw8i32o2i = 251 + dnnl_OhwI8i32o2i = 795 + dnnl_OIhw8i24o2i = 704 + dnnl_OhwI8i24o2i = 798 + dnnl_OIhw8i64o2i = 252 + dnnl_OhwI8i64o2i = 792 + dnnl_OIhw8i8o = 107 + dnnl_OhwI8i8o = 789 + dnnl_OIhw8o16i2o = 98 + dnnl_OIhw2i8o4i = 99 + dnnl_IOhw8o16i2o = 106 + dnnl_OIhw8o8i = 100 + dnnl_OIhw8o4i = 101 + dnnl_Owhi16o = 221 + dnnl_OIhw8i32o = 683 + dnnl_OhwI8i32o = 780 + dnnl_OIhw8i24o = 687 + dnnl_OhwI8i24o = 783 + dnnl_OIhw8i16o = 691 + dnnl_OhwI8i16o = 786 + dnnl_Odhwi16o = 217 + dnnl_OdhwI16o2i = 218 + dnnl_OdhwI16o4i = 273 + dnnl_Idhwo8i = 726 + dnnl_IdhwO8i2o = 727 + dnnl_IdhwO8i4o = 748 + dnnl_Idhwo16i = 630 + dnnl_IdhwO16i2o = 631 + dnnl_IdhwO16i4o = 632 + dnnl_Idhwo24i = 738 + dnnl_IdhwO24i2o = 739 + dnnl_IdhwO24i4o = 754 + dnnl_Odhwi4o = 219 + dnnl_Odhwi8o = 220 + dnnl_OdhwI8o2i = 677 + dnnl_OdhwI8o4i = 718 + dnnl_Odwhi16o = 517 + dnnl_OIdhw16i16o = 122 + dnnl_OdhwI16i16o = 778 + dnnl_OIdhw16i32o = 257 + dnnl_OdhwI16i32o = 775 + dnnl_OIdhw16i48o = 761 + dnnl_OdhwI16i48o = 772 + dnnl_OIdhw16i64o = 258 + dnnl_OdhwI16i64o = 769 + dnnl_OIdhw16o16i = 116 + dnnl_Oidhw16o = 114 + dnnl_OIdhw4i4o = 129 + dnnl_OIdhw4o4i = 130 + dnnl_Oidhw4o = 126 + dnnl_OIdhw8i8o2i = 709 + dnnl_OdhwI8i8o2i = 805 + dnnl_OIdhw8i16o2i = 143 + dnnl_OdhwI8i16o2i = 802 + dnnl_OIdhw8i32o2i = 259 + dnnl_OdhwI8i32o2i = 796 + dnnl_OIdhw8i24o2i = 705 + dnnl_OdhwI8i24o2i = 799 + dnnl_OIdhw8i64o2i = 260 + dnnl_OdhwI8i64o2i = 793 + dnnl_OIdhw8i8o = 147 + dnnl_OdhwI8i8o = 790 + dnnl_OIdhw8o16i2o = 111 + dnnl_IOdhw8o16i2o = 117 + dnnl_OIdhw4i8o4i = 700 + dnnl_OdhwI4i8o4i = 820 + dnnl_OIdhw4i16o4i = 119 + dnnl_OdhwI4i16o4i = 817 + dnnl_OIdhw4i24o4i = 701 + dnnl_OdhwI4i24o4i = 814 + dnnl_OIdhw4i32o4i = 253 + dnnl_OdhwI4i32o4i = 811 + dnnl_OIdhw4i64o4i = 254 + dnnl_OdhwI4i64o4i = 808 + dnnl_OIdhw16i16o4i = 255 + dnnl_OIdhw16i16o2i = 256 + dnnl_OIdhw2i8o4i = 120 + dnnl_OIdhw8o8i = 139 + dnnl_OIdhw8o4i = 140 + dnnl_IOdhw16i16o = 141 + dnnl_OIdhw4o8i8o4i = 153 + dnnl_IOdhw16o16i = 229 + dnnl_OIdhw16o16i2o = 515 + dnnl_OIdhw8i32o = 684 + dnnl_OdhwI8i32o = 781 + dnnl_OIdhw8i24o = 688 + dnnl_OdhwI8i24o = 784 + dnnl_OIdhw8i16o = 692 + dnnl_OdhwI8i16o = 787 + dnnl_Goiw16g = 73 + dnnl_Goiw8g = 74 + dnnl_Goiw4g = 82 + dnnl_gIOw16o16i = 207 + dnnl_gIOw16i16o = 208 + dnnl_gOIw16i16o = 81 + dnnl_gOIw16o16i = 80 + dnnl_gOiw16o = 78 + dnnl_gOIw4i16o4i = 92 + dnnl_gOIw2i8o4i = 93 + dnnl_gOIw16i16o4i = 94 + dnnl_gOIw16i16o2i = 95 + dnnl_gOIw16o16i2o = 276 + dnnl_gOIw4i4o = 96 + dnnl_gOIw4o4i = 97 + dnnl_gOiw4o = 84 + dnnl_gOIw8i16o2i = 110 + dnnl_gOIw8i8o = 113 + dnnl_gOIw8o16i2o = 105 + dnnl_gIOw8o16i2o = 112 + dnnl_gOIw8o8i = 108 + dnnl_gOIw8o4i = 109 + dnnl_gOwi16o = 183 + dnnl_gOwI16o2i = 184 + dnnl_gOwI16o4i = 185 + dnnl_gIwo8i = 728 + dnnl_gIwO8i2o = 729 + dnnl_gIwO8i4o = 749 + dnnl_gIwo16i = 633 + dnnl_gIwO16i2o = 634 + dnnl_gIwO16i4o = 635 + dnnl_gIwo24i = 740 + dnnl_gIwO24i2o = 741 + dnnl_gIwO24i4o = 755 + dnnl_gOwi4o = 186 + dnnl_gOwi8o = 187 + dnnl_gOwI8o2i = 678 + dnnl_gOwI8o4i = 719 + dnnl_Goiw32g = 76 + dnnl_gOIw2i4o2i = 90 + dnnl_gOIw2o4i2o = 118 + dnnl_gOIw4i8o2i = 103 + dnnl_gOIw4o8i2o = 91 + dnnl_goIw4i = 533 + dnnl_goIw32i = 378 + dnnl_gIOhw16i16o = 210 + dnnl_gIOhw16o16i = 209 + dnnl_gOhwi16o = 188 + dnnl_gOhwI16o2i = 189 + dnnl_gOhwI16o4i = 190 + dnnl_gIhwo8i = 730 + dnnl_gIhwO8i2o = 731 + dnnl_gIhwO8i4o = 750 + dnnl_gIhwo16i = 636 + dnnl_gIhwO16i2o = 637 + dnnl_gIhwO16i4o = 638 + dnnl_gIhwo24i = 742 + dnnl_gIhwO24i2o = 743 + dnnl_gIhwO24i4o = 756 + dnnl_gOhwi32o = 191 + dnnl_gOhwi24o = 664 + dnnl_gOhwI24o2i = 673 + dnnl_gOhwI24o4i = 714 + dnnl_gOhwi4o = 192 + dnnl_gOhwi8o = 193 + dnnl_gOhwI8o2i = 679 + dnnl_gOhwI8o4i = 720 + dnnl_Goihw16g = 114 + dnnl_gOIhw16i16o = 124 + dnnl_gOIhw16o16i = 123 + dnnl_gOihw16o = 121 + dnnl_gOIhw2i8o4i = 125 + dnnl_gOIhw4i16o4i = 134 + dnnl_gOIhw16i16o4i = 135 + dnnl_gOIhw16i16o2i = 136 + dnnl_gOIhw16o16i2o = 277 + dnnl_gOIhw4i4o = 137 + dnnl_gOIhw4o4i = 131 + dnnl_gOihw4o = 128 + dnnl_Goihw8g = 138 + dnnl_Goihw4g = 126 + dnnl_gOIhw8i16o2i = 161 + dnnl_gOIhw8i8o = 162 + dnnl_gOIhw8o16i2o = 144 + dnnl_gIOhw8o16i2o = 146 + dnnl_gOIhw8o8i = 149 + dnnl_gOIhw8o4i = 150 + dnnl_Goihw32g = 115 + dnnl_gOwhi16o = 201 + dnnl_goIhw4i = 534 + dnnl_goIhw32i = 536 + dnnl_OIw4o8i8o4i = 151 + dnnl_OIhw4o8i8o4i = 152 + dnnl_IOw4i8o8i4o = 154 + dnnl_IOhw4i8o8i4o = 155 + dnnl_IOdhw4i8o8i4o = 156 + dnnl_OIhw2o8i8o2i = 157 + dnnl_gOIw4o8i8o4i = 158 + dnnl_gOIhw4o8i8o4i = 159 + dnnl_gOIdhw4o8i8o4i = 179 + dnnl_gIOw4i8o8i4o = 226 + dnnl_gIOhw4i8o8i4o = 227 + dnnl_gIOdhw4i8o8i4o = 228 + dnnl_gOIhw2o8i8o2i = 160 + dnnl_gOIhw2i4o2i = 132 + dnnl_gOIhw2o4i2o = 163 + dnnl_gOIhw4i8o2i = 145 + dnnl_gOIhw4o8i2o = 133 + dnnl_gIOdhw16i16o = 196 + dnnl_gIOdhw16o16i = 230 + dnnl_gOdhwi16o = 194 + dnnl_gOdhwI16o2i = 195 + dnnl_gOdhwI16o4i = 272 + dnnl_gIdhwo8i = 732 + dnnl_gIdhwO8i2o = 733 + dnnl_gIdhwO8i4o = 751 + dnnl_gIdhwo16i = 639 + dnnl_gIdhwO16i2o = 640 + dnnl_gIdhwO16i4o = 641 + dnnl_gIdhwo24i = 744 + dnnl_gIdhwO24i2o = 745 + dnnl_gIdhwO24i4o = 757 + dnnl_gOdhwi4o = 197 + dnnl_gOdhwi8o = 198 + dnnl_gOdhwI8o2i = 680 + dnnl_gOdhwI8o4i = 721 + dnnl_gOdwhi16o = 518 + dnnl_gOIdhw16i16o = 166 + dnnl_gOIdhw4i16o4i = 167 + dnnl_gOIdhw16i16o4i = 261 + dnnl_gOIdhw2i8o4i = 168 + dnnl_gOIdhw16i16o2i = 262 + dnnl_gOIdhw16o16i = 165 + dnnl_gOIdhw16o16i2o = 516 + dnnl_gOidhw16o = 164 + dnnl_gOIdhw4i4o = 172 + dnnl_gOIdhw4o4i = 173 + dnnl_gOidhw4o = 171 + dnnl_gOIdhw8i16o2i = 178 + dnnl_gOIdhw8i8o = 182 + dnnl_gOIdhw8o16i2o = 180 + dnnl_gIOdhw8o16i2o = 181 + dnnl_gOIdhw8o8i = 176 + dnnl_gOIdhw8o4i = 177 + dnnl_Goidhw16g = 199 + dnnl_Goidhw32g = 200 + dnnl_gOIdhw2i4o2i = 174 + dnnl_gOIdhw4i8o2i = 169 + dnnl_gOIdhw2o4i2o = 170 + dnnl_gOIdhw4o8i2o = 175 + dnnl_goIdhw4i = 535 + dnnl_goIdhw32i = 537 + dnnl_Owi24o = 660 + dnnl_OwI24o2i = 669 + dnnl_OwI24o4i = 710 + dnnl_Owi32o = 278 + dnnl_OwI32o2i = 279 + dnnl_OwI32o4i = 280 + dnnl_Owi48o = 281 + dnnl_OwI48o2i = 282 + dnnl_OwI48o4i = 283 + dnnl_Owi64o = 284 + dnnl_OwI64o2i = 285 + dnnl_OwI64o4i = 286 + dnnl_Iwo32i = 570 + dnnl_IwO32i2o = 571 + dnnl_IwO32i4o = 572 + dnnl_Iwo48i = 573 + dnnl_IwO48i2o = 574 + dnnl_IwO48i4o = 575 + dnnl_Iwo64i = 576 + dnnl_IwO64i2o = 577 + dnnl_IwO64i4o = 578 + dnnl_wIo2i = 287 + dnnl_wIo4i = 288 + dnnl_gOwi24o = 663 + dnnl_gOwI24o2i = 672 + dnnl_gOwI24o4i = 713 + dnnl_gOwi32o = 289 + dnnl_gOwI32o2i = 290 + dnnl_gOwI32o4i = 291 + dnnl_gOwi48o = 292 + dnnl_gOwI48o2i = 293 + dnnl_gOwI48o4i = 294 + dnnl_gOwi64o = 295 + dnnl_gOwI64o2i = 296 + dnnl_gOwI64o4i = 297 + dnnl_gIwo32i = 579 + dnnl_gIwO32i2o = 580 + dnnl_gIwO32i4o = 581 + dnnl_gIwo48i = 582 + dnnl_gIwO48i2o = 583 + dnnl_gIwO48i4o = 584 + dnnl_gIwo64i = 585 + dnnl_gIwO64i2o = 586 + dnnl_gIwO64i4o = 587 + dnnl_gwio = 24 + dnnl_gwIo2i = 298 + dnnl_gwIo4i = 299 + dnnl_OhwI24o = 661 + dnnl_OhwI24o2i = 670 + dnnl_OhwI24o4i = 711 + dnnl_OhwI32o = 214 + dnnl_OhwI32o2i = 300 + dnnl_OhwI32o4i = 301 + dnnl_Ohwi48o = 302 + dnnl_OhwI48o2i = 303 + dnnl_OhwI48o4i = 304 + dnnl_Ohwi64o = 305 + dnnl_OhwI64o2i = 306 + dnnl_OhwI64o4i = 307 + dnnl_Ihwo32i = 642 + dnnl_IhwO32i2o = 643 + dnnl_IhwO32i4o = 644 + dnnl_Ihwo48i = 645 + dnnl_IhwO48i2o = 646 + dnnl_IhwO48i4o = 647 + dnnl_Ihwo64i = 648 + dnnl_IhwO64i2o = 649 + dnnl_IhwO64i4o = 650 + dnnl_hwIo2i = 308 + dnnl_hwIo4i = 309 + dnnl_gOhwI24o = 664 + dnnl_gOhwI32o = 191 + dnnl_gOhwI32o2i = 310 + dnnl_gOhwI32o4i = 311 + dnnl_gOhwi48o = 312 + dnnl_gOhwI48o2i = 313 + dnnl_gOhwI48o4i = 314 + dnnl_gOhwi64o = 315 + dnnl_gOhwI64o2i = 316 + dnnl_gOhwI64o4i = 317 + dnnl_gIhwo32i = 651 + dnnl_gIhwO32i2o = 652 + dnnl_gIhwO32i4o = 653 + dnnl_gIhwo48i = 654 + dnnl_gIhwO48i2o = 655 + dnnl_gIhwO48i4o = 656 + dnnl_gIhwo64i = 657 + dnnl_gIhwO64i2o = 658 + dnnl_gIhwO64i4o = 659 + dnnl_ghwio = 34 + dnnl_ghwIo2i = 318 + dnnl_ghwIo4i = 319 + dnnl_Odhwi24o = 662 + dnnl_OdhwI24o2i = 671 + dnnl_OdhwI24o4i = 712 + dnnl_Odhwi32o = 320 + dnnl_OdhwI32o2i = 321 + dnnl_OdhwI32o4i = 322 + dnnl_Odhwi48o = 323 + dnnl_OdhwI48o2i = 324 + dnnl_OdhwI48o4i = 325 + dnnl_Odhwi64o = 326 + dnnl_OdhwI64o2i = 327 + dnnl_OdhwI64o4i = 328 + dnnl_Idhwo32i = 561 + dnnl_IdhwO32i2o = 562 + dnnl_IdhwO32i4o = 563 + dnnl_Idhwo48i = 564 + dnnl_IdhwO48i2o = 565 + dnnl_IdhwO48i4o = 566 + dnnl_Idhwo64i = 567 + dnnl_IdhwO64i2o = 568 + dnnl_IdhwO64i4o = 569 + dnnl_dhwIo2i = 329 + dnnl_dhwIo4i = 330 + dnnl_gOdhwi24o = 665 + dnnl_gOdhwI24o2i = 674 + dnnl_gOdhwI24o4i = 715 + dnnl_gOdhwi32o = 331 + dnnl_gOdhwI32o2i = 332 + dnnl_gOdhwI32o4i = 333 + dnnl_gOdhwi48o = 334 + dnnl_gOdhwI48o2i = 335 + dnnl_gOdhwI48o4i = 336 + dnnl_gOdhwi64o = 337 + dnnl_gOdhwI64o2i = 338 + dnnl_gOdhwI64o4i = 339 + dnnl_gIdhwo32i = 552 + dnnl_gIdhwO32i2o = 553 + dnnl_gIdhwO32i4o = 554 + dnnl_gIdhwo48i = 555 + dnnl_gIdhwO48i2o = 556 + dnnl_gIdhwO48i4o = 557 + dnnl_gIdhwo64i = 558 + dnnl_gIdhwO64i2o = 559 + dnnl_gIdhwO64i4o = 560 + dnnl_gdhwio = 44 + dnnl_gdhwIo2i = 340 + dnnl_gdhwIo4i = 341 + dnnl_OI16i32o4i = 342 + dnnl_OI16i48o4i = 343 + dnnl_OI16i64o4i = 344 + dnnl_OI16i16o2i = 345 + dnnl_OI16i32o2i = 346 + dnnl_OI16i48o2i = 347 + dnnl_OI16i64o2i = 348 + dnnl_OIw16i32o4i = 349 + dnnl_OIw16i48o4i = 350 + dnnl_OIw16i64o4i = 351 + dnnl_OIw16i32o2i = 352 + dnnl_OIw16i48o2i = 353 + dnnl_OIw16i64o2i = 354 + dnnl_OIhw16i32o4i = 355 + dnnl_OIhw16i48o4i = 356 + dnnl_OIhw16i64o4i = 357 + dnnl_OIhw16i32o2i = 358 + dnnl_OIhw16i48o2i = 359 + dnnl_OIhw16i64o2i = 360 + dnnl_OIdhw16i32o4i = 361 + dnnl_OIdhw16i48o4i = 362 + dnnl_OIdhw16i64o4i = 363 + dnnl_OIdhw16i32o2i = 364 + dnnl_OIdhw16i48o2i = 365 + dnnl_OIdhw16i64o2i = 366 + dnnl_OwI16i16o2i = 393 + dnnl_OwI16i16o4i = 394 + dnnl_OhwI16i16o2i = 395 + dnnl_OhwI16i16o4i = 396 + dnnl_OdhwI16i16o2i = 397 + dnnl_OdhwI16i16o4i = 399 + dnnl_IwO16o16i2o = 588 + dnnl_IwO16o16i4o = 589 + dnnl_IhwO16o16i2o = 590 + dnnl_IhwO16o16i4o = 591 + dnnl_IdhwO16o16i2o = 592 + dnnl_IdhwO16o16i4o = 593 + dnnl_gOwI16i16o2i = 406 + dnnl_gOwI16i16o4i = 407 + dnnl_gOhwI16i16o2i = 390 + dnnl_gOhwI16i16o4i = 391 + dnnl_gOdhwI16i16o2i = 392 + dnnl_gOdhwI16i16o4i = 398 + dnnl_gIwO16o16i2o = 594 + dnnl_gIwO16o16i4o = 595 + dnnl_gIhwO16o16i2o = 596 + dnnl_gIhwO16o16i4o = 597 + dnnl_gIdhwO16o16i2o = 598 + dnnl_gIdhwO16o16i4o = 599 + dnnl_OwI16i32o2i = 400 + dnnl_OwI16i32o4i = 401 + dnnl_OwI16i48o2i = 402 + dnnl_OwI16i48o4i = 403 + dnnl_OwI16i64o2i = 404 + dnnl_OwI16i64o4i = 405 + dnnl_IwO16o32i2o = 600 + dnnl_IwO16o32i4o = 601 + dnnl_IwO16o48i2o = 602 + dnnl_IwO16o48i4o = 603 + dnnl_IwO16o64i2o = 604 + dnnl_IwO16o64i4o = 605 + dnnl_gOwI16i32o2i = 408 + dnnl_gOwI16i32o4i = 409 + dnnl_gOwI16i48o2i = 410 + dnnl_gOwI16i48o4i = 411 + dnnl_gOwI16i64o2i = 412 + dnnl_gOwI16i64o4i = 413 + dnnl_gIwO16o32i2o = 606 + dnnl_gIwO16o32i4o = 607 + dnnl_gIwO16o48i2o = 608 + dnnl_gIwO16o48i4o = 609 + dnnl_gIwO16o64i2o = 610 + dnnl_gIwO16o64i4o = 611 + dnnl_OhwI16i32o2i = 414 + dnnl_OhwI16i32o4i = 415 + dnnl_OhwI16i48o2i = 416 + dnnl_OhwI16i48o4i = 417 + dnnl_OhwI16i64o2i = 418 + dnnl_OhwI16i64o4i = 419 + dnnl_IhwO16o32i2o = 612 + dnnl_IhwO16o32i4o = 613 + dnnl_IhwO16o48i2o = 614 + dnnl_IhwO16o48i4o = 615 + dnnl_IhwO16o64i2o = 616 + dnnl_IhwO16o64i4o = 617 + dnnl_gOhwI16i32o2i = 420 + dnnl_gOhwI16i32o4i = 421 + dnnl_gOhwI16i48o2i = 422 + dnnl_gOhwI16i48o4i = 423 + dnnl_gOhwI16i64o2i = 424 + dnnl_gOhwI16i64o4i = 425 + dnnl_gIhwO16o32i2o = 618 + dnnl_gIhwO16o32i4o = 619 + dnnl_gIhwO16o48i2o = 620 + dnnl_gIhwO16o48i4o = 621 + dnnl_gIhwO16o64i2o = 622 + dnnl_gIhwO16o64i4o = 623 + dnnl_OdhwI16i32o2i = 426 + dnnl_OdhwI16i32o4i = 427 + dnnl_OdhwI16i48o2i = 428 + dnnl_OdhwI16i48o4i = 429 + dnnl_OdhwI16i64o2i = 430 + dnnl_OdhwI16i64o4i = 431 + dnnl_IdhwO16o32i2o = 546 + dnnl_IdhwO16o32i4o = 547 + dnnl_IdhwO16o48i2o = 548 + dnnl_IdhwO16o48i4o = 549 + dnnl_IdhwO16o64i2o = 550 + dnnl_IdhwO16o64i4o = 551 + dnnl_gOdhwI16i32o2i = 432 + dnnl_gOdhwI16i32o4i = 433 + dnnl_gOdhwI16i48o2i = 434 + dnnl_gOdhwI16i48o4i = 435 + dnnl_gOdhwI16i64o2i = 436 + dnnl_gOdhwI16i64o4i = 437 + dnnl_gIdhwO16o32i2o = 540 + dnnl_gIdhwO16o32i4o = 541 + dnnl_gIdhwO16o48i2o = 542 + dnnl_gIdhwO16o48i4o = 543 + dnnl_gIdhwO16o64i2o = 544 + dnnl_gIdhwO16o64i4o = 545 + dnnl_hwioG16g = 438 + dnnl_hwioG8g = 539 + dnnl_dhwioG16g = 765 + dnnl_dhwioG8g = 766 + dnnl_NCdhw40n16c = 444 + dnnl_NCw40n16c = 372 + dnnl_NChw40n16c = 376 + dnnl_NCw40n32c = 373 + dnnl_NChw40n32c = 377 + dnnl_NCdhw40n32c = 445 + dnnl_OIdhw4o8i8o2i = 447 + dnnl_OIhw4o8i8o2i = 448 + dnnl_OIw4o8i8o2i = 449 + dnnl_gOIdhw4o8i8o2i = 450 + dnnl_gOIhw4o8i8o2i = 451 + dnnl_gOIw4o8i8o2i = 452 + dnnl_IOdhw4i8o8i2o = 453 + dnnl_IOhw4i8o8i2o = 454 + dnnl_IOw4i8o8i2o = 455 + dnnl_gIOdhw4i8o8i2o = 456 + dnnl_gIOhw4i8o8i2o = 457 + dnnl_gIOw4i8o8i2o = 458 + dnnl_NCw2c32n8c = 474 + dnnl_NChw2c32n8c = 478 + dnnl_NCdhw2c32n8c = 483 + dnnl_OIw2i8o16i4o = 469 + dnnl_OIhw2i8o16i4o = 470 + dnnl_OIdhw2i8o16i4o = 471 + dnnl_OIw2o8i16o4i = 472 + dnnl_OIw2o8i16o2i = 473 + dnnl_IOw2i8o16i4o = 490 + dnnl_IOw2i8o16i2o = 493 + dnnl_OIhw2o8i16o4i = 475 + dnnl_OIhw2o8i16o2i = 476 + dnnl_IOhw2i8o16i4o = 489 + dnnl_IOhw2i8o16i2o = 492 + dnnl_OIdhw2o8i16o4i = 480 + dnnl_OIdhw2o8i16o2i = 481 + dnnl_IOdhw2i8o16i4o = 488 + dnnl_IOdhw2i8o16i2o = 491 + dnnl_gOIw2o8i16o2i = 485 + dnnl_gIOw2i8o16i2o = 477 + dnnl_gIOhw2i8o16i2o = 494 + dnnl_gIOdhw2i8o16i2o = 495 + dnnl_gOIhw2o8i16o2i = 486 + dnnl_gOIdhw2o8i16o2i = 487 + dnnl_gOIw2o8i16o4i = 497 + dnnl_gOIhw2o8i16o4i = 498 +end + +""" + dnnl_prop_kind_t + +Kinds of propagation. + +| Enumerator | Note | +| :------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_prop\\_kind\\_undef | Undefined propagation type. | +| dnnl\\_forward\\_training | Forward data propagation (training mode). In this mode primitives perform computations necessary for subsequent backward propagation. | +| dnnl\\_forward\\_inference | Forward data propagation (inference mode). In this mode primitives perform only computations that are necessary for inference and omit computations that are necessary only for backward propagation. | +| dnnl\\_forward | Forward data propagation (alias for `dnnl_forward_training`). | +| dnnl\\_backward | Backward propagation (with respect to all parameters). | +| dnnl\\_backward\\_data | Backward data propagation. | +| dnnl\\_backward\\_weights | Backward weights propagation. | +| dnnl\\_backward\\_bias | Backward bias propagation. | +""" +@cenum dnnl_prop_kind_t::UInt32 begin + dnnl_prop_kind_undef = 0 + dnnl_forward_training = 64 + dnnl_forward_inference = 96 + dnnl_forward = 64 + dnnl_backward = 128 + dnnl_backward_data = 160 + dnnl_backward_weights = 192 + dnnl_backward_bias = 193 +end + +""" + dnnl_primitive_kind_t + +Kinds of primitives. Used to implement a way to extend the library with new primitives without changing the ABI. + +| Enumerator | Note | +| :---------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_undefined\\_primitive | Undefined primitive | +| dnnl\\_reorder | A reorder primitive. | +| dnnl\\_shuffle | A shuffle primitive. | +| dnnl\\_concat | A (out-of-place) concat primitive. | +| dnnl\\_sum | A sum primitive. | +| dnnl\\_convolution | A convolution primitive. | +| dnnl\\_deconvolution | A deconvolution primitive. | +| dnnl\\_eltwise | An element-wise primitive. | +| dnnl\\_lrn | An LRN primitive. | +| dnnl\\_batch\\_normalization | A batch normalization primitive. | +| dnnl\\_inner\\_product | An inner product primitive. | +| dnnl\\_rnn | A rnn primitive. | +| dnnl\\_gemm | A matrix multiplication primitive (internal). | +| dnnl\\_binary | A binary primitive. | +| dnnl\\_matmul | A matrix multiplication primitive. | +| dnnl\\_resampling | A resampling primitive. | +| dnnl\\_pooling | A pooling primitive. | +| dnnl\\_reduction | A reduction primitive. | +| dnnl\\_prelu | A PReLU primitive. | +| dnnl\\_softmax | A softmax primitive. | +| dnnl\\_layer\\_normalization | A layer normalization primitive. | +| dnnl\\_group\\_normalization | A group normalization primitive. | +| dnnl\\_primitive\\_kind\\_max | Parameter to allow internal only primitives without undefined behavior. This parameter is chosen to be valid for so long as sizeof(int) >= 2. | +""" +@cenum dnnl_primitive_kind_t::UInt32 begin + dnnl_undefined_primitive = 0 + dnnl_reorder = 1 + dnnl_shuffle = 2 + dnnl_concat = 3 + dnnl_sum = 4 + dnnl_convolution = 5 + dnnl_deconvolution = 6 + dnnl_eltwise = 7 + dnnl_lrn = 8 + dnnl_batch_normalization = 9 + dnnl_inner_product = 10 + dnnl_rnn = 11 + dnnl_gemm = 12 + dnnl_binary = 13 + dnnl_matmul = 14 + dnnl_resampling = 15 + dnnl_pooling = 16 + dnnl_reduction = 17 + dnnl_prelu = 18 + dnnl_softmax = 19 + dnnl_layer_normalization = 20 + dnnl_group_normalization = 21 + dnnl_primitive_kind_max = 32767 +end + +""" + dnnl_alg_kind_t + +Kinds of algorithms. + +| Enumerator | Note | +| :------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_convolution\\_direct | Direct convolution | +| dnnl\\_convolution\\_winograd | Winograd convolution | +| dnnl\\_convolution\\_auto | Convolution algorithm(either direct or Winograd) is chosen just in time | +| dnnl\\_deconvolution\\_direct | Direct deconvolution | +| dnnl\\_deconvolution\\_winograd | Winograd deconvolution | +| dnnl\\_eltwise\\_relu | Eltwise: ReLU | +| dnnl\\_eltwise\\_tanh | Eltwise: hyperbolic tangent non-linearity (tanh) | +| dnnl\\_eltwise\\_elu | Eltwise: exponential linear unit (elu) | +| dnnl\\_eltwise\\_square | Eltwise: square | +| dnnl\\_eltwise\\_abs | Eltwise: abs | +| dnnl\\_eltwise\\_sqrt | Eltwise: square root | +| dnnl\\_eltwise\\_linear | Eltwise: linear | +| dnnl\\_eltwise\\_soft\\_relu | Eltwise: soft\\_relu | +| dnnl\\_eltwise\\_hardsigmoid | Eltwise: hardsigmoid | +| dnnl\\_eltwise\\_logistic | Eltwise: logistic | +| dnnl\\_eltwise\\_exp | Eltwise: exponent | +| dnnl\\_eltwise\\_gelu\\_tanh | Eltwise: gelu !!! note Tanh approximation formula is used to approximate the cumulative distribution function of a Gaussian here | +| dnnl\\_eltwise\\_swish | Eltwise: swish | +| dnnl\\_eltwise\\_log | Eltwise: natural logarithm | +| dnnl\\_eltwise\\_clip | Eltwise: clip | +| dnnl\\_eltwise\\_clip\\_v2 | Eltwise: clip version 2 | +| dnnl\\_eltwise\\_pow | Eltwise: pow | +| dnnl\\_eltwise\\_gelu\\_erf | Eltwise: erf-based gelu | +| dnnl\\_eltwise\\_round | Eltwise: round | +| dnnl\\_eltwise\\_mish | Eltwise: mish | +| dnnl\\_eltwise\\_hardswish | Eltwise: hardswish | +| dnnl\\_eltwise\\_relu\\_use\\_dst\\_for\\_bwd | Eltwise: ReLU (dst for backward) | +| dnnl\\_eltwise\\_tanh\\_use\\_dst\\_for\\_bwd | Eltwise: hyperbolic tangent non-linearity (tanh) (dst for backward) | +| dnnl\\_eltwise\\_elu\\_use\\_dst\\_for\\_bwd | Eltwise: exponential linear unit (elu) (dst for backward) | +| dnnl\\_eltwise\\_sqrt\\_use\\_dst\\_for\\_bwd | Eltwise: square root (dst for backward) | +| dnnl\\_eltwise\\_logistic\\_use\\_dst\\_for\\_bwd | Eltwise: logistic (dst for backward) | +| dnnl\\_eltwise\\_exp\\_use\\_dst\\_for\\_bwd | Eltwise: exp (dst for backward) | +| dnnl\\_eltwise\\_clip\\_v2\\_use\\_dst\\_for\\_bwd | Eltwise: clip version 2 (dst for backward) | +| dnnl\\_pooling\\_max | Max pooling | +| dnnl\\_pooling\\_avg\\_include\\_padding | Average pooling include padding | +| dnnl\\_pooling\\_avg\\_exclude\\_padding | Average pooling exclude padding | +| dnnl\\_lrn\\_across\\_channels | Local response normalization (LRN) across multiple channels | +| dnnl\\_lrn\\_within\\_channel | LRN within a single channel | +| dnnl\\_vanilla\\_rnn | RNN cell | +| dnnl\\_vanilla\\_lstm | LSTM cell | +| dnnl\\_vanilla\\_gru | GRU cell | +| dnnl\\_lbr\\_gru | GRU cell with linear before reset Modification of original GRU cell. Differs from #dnnl\\_vanilla\\_gru in how the new memory gate is calculated: ```c++ c_t = tanh(W_c*x_t + b_{c_x} + r_t*(U_c*h_{t-1}+b_{c_h})) ``` Primitive expects 4 biases on input: ```c++ [b_{u}, b_{r}, b_{c_x}, b_{c_h}] ``` | +| dnnl\\_vanilla\\_augru | AUGRU cell | +| dnnl\\_lbr\\_augru | AUGRU cell with linear before reset | +| dnnl\\_binary\\_add | Binary add | +| dnnl\\_binary\\_mul | Binary mul | +| dnnl\\_binary\\_max | Binary max | +| dnnl\\_binary\\_min | Binary min | +| dnnl\\_binary\\_div | Binary div | +| dnnl\\_binary\\_sub | Binary sub | +| dnnl\\_binary\\_ge | Binary greater or equal | +| dnnl\\_binary\\_gt | Binary greater than | +| dnnl\\_binary\\_le | Binary less or equal | +| dnnl\\_binary\\_lt | Binary less than | +| dnnl\\_binary\\_eq | Binary equal | +| dnnl\\_binary\\_ne | Binary not equal | +| dnnl\\_resampling\\_nearest | Nearest Neighbor Resampling Method | +| dnnl\\_resampling\\_linear | Linear Resampling Method | +| dnnl\\_reduction\\_max | Reduction using max | +| dnnl\\_reduction\\_min | Reduction using min | +| dnnl\\_reduction\\_sum | Reduction using sum | +| dnnl\\_reduction\\_mul | Reduction using mul | +| dnnl\\_reduction\\_mean | Reduction using mean | +| dnnl\\_reduction\\_norm\\_lp\\_max | Reduction using lp norm | +| dnnl\\_reduction\\_norm\\_lp\\_sum | | +| dnnl\\_reduction\\_norm\\_lp\\_power\\_p\\_max | Reduction using lp norm without final pth-root | +| dnnl\\_reduction\\_norm\\_lp\\_power\\_p\\_sum | | +| dnnl\\_softmax\\_accurate | Softmax | +| dnnl\\_softmax\\_log | Logsoftmax | +""" +@cenum dnnl_alg_kind_t::UInt32 begin + dnnl_alg_kind_undef = 0 + dnnl_convolution_direct = 1 + dnnl_convolution_winograd = 2 + dnnl_convolution_auto = 3 + dnnl_deconvolution_direct = 10 + dnnl_deconvolution_winograd = 11 + dnnl_eltwise_relu = 32 + dnnl_eltwise_tanh = 33 + dnnl_eltwise_elu = 34 + dnnl_eltwise_square = 35 + dnnl_eltwise_abs = 36 + dnnl_eltwise_sqrt = 37 + dnnl_eltwise_linear = 38 + dnnl_eltwise_soft_relu = 39 + dnnl_eltwise_hardsigmoid = 40 + dnnl_eltwise_logistic = 41 + dnnl_eltwise_exp = 42 + dnnl_eltwise_gelu_tanh = 43 + dnnl_eltwise_swish = 44 + dnnl_eltwise_log = 45 + dnnl_eltwise_clip = 46 + dnnl_eltwise_clip_v2 = 47 + dnnl_eltwise_pow = 48 + dnnl_eltwise_gelu_erf = 49 + dnnl_eltwise_round = 50 + dnnl_eltwise_mish = 51 + dnnl_eltwise_hardswish = 52 + dnnl_eltwise_relu_use_dst_for_bwd = 256 + dnnl_eltwise_tanh_use_dst_for_bwd = 257 + dnnl_eltwise_elu_use_dst_for_bwd = 258 + dnnl_eltwise_sqrt_use_dst_for_bwd = 259 + dnnl_eltwise_logistic_use_dst_for_bwd = 260 + dnnl_eltwise_exp_use_dst_for_bwd = 261 + dnnl_eltwise_clip_v2_use_dst_for_bwd = 262 + dnnl_pooling_max = 511 + dnnl_pooling_avg_include_padding = 767 + dnnl_pooling_avg_exclude_padding = 1023 + dnnl_lrn_across_channels = 2815 + dnnl_lrn_within_channel = 3071 + dnnl_vanilla_rnn = 8191 + dnnl_vanilla_lstm = 12287 + dnnl_vanilla_gru = 16383 + dnnl_lbr_gru = 20479 + dnnl_vanilla_augru = 24575 + dnnl_lbr_augru = 28671 + dnnl_binary_add = 131056 + dnnl_binary_mul = 131057 + dnnl_binary_max = 131058 + dnnl_binary_min = 131059 + dnnl_binary_div = 131060 + dnnl_binary_sub = 131061 + dnnl_binary_ge = 131062 + dnnl_binary_gt = 131063 + dnnl_binary_le = 131064 + dnnl_binary_lt = 131065 + dnnl_binary_eq = 131066 + dnnl_binary_ne = 131067 + dnnl_resampling_nearest = 196592 + dnnl_resampling_linear = 196593 + dnnl_reduction_max = 196594 + dnnl_reduction_min = 196595 + dnnl_reduction_sum = 196596 + dnnl_reduction_mul = 196597 + dnnl_reduction_mean = 196598 + dnnl_reduction_norm_lp_max = 196599 + dnnl_reduction_norm_lp_sum = 196600 + dnnl_reduction_norm_lp_power_p_max = 196601 + dnnl_reduction_norm_lp_power_p_sum = 196602 + dnnl_softmax_accurate = 196608 + dnnl_softmax_log = 196609 +end + +""" + dnnl_normalization_flags_t + +Flags for normalization primitives. + +| Enumerator | Note | +| :---------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_normalization\\_flags\\_none | Use no normalization flags If specified - on forward training propagation mean and variance are computed and stored as output - on backward propagation compute full derivative wrt data - on backward propagation prop\\_kind == #dnnl\\_backward\\_data has the same behavior as prop\\_kind == #dnnl\\_backward | +| dnnl\\_use\\_global\\_stats | Use global statistics If specified - on forward propagation use mean and variance provided by user (input) - on backward propagation reduces the amount of computations, since mean and variance are considered as constants If not specified: - on forward propagation mean and variance are computed and stored as output - on backward propagation compute full derivative wrt data | +| dnnl\\_use\\_scale | Use scale parameter If specified: - on forward propagation use scale for the normalization results - on backward propagation (for prop\\_kind == #dnnl\\_backward) compute diff wrt scale (hence one extra output used) | +| dnnl\\_use\\_shift | Use shift parameter If specified: - on forward propagation use shift (aka bias) for the normalization results - on backward propagation (for prop\\_kind == #dnnl\\_backward) compute diff wrt shift (hence one extra output used) | +| dnnl\\_fuse\\_norm\\_relu | Fuse with ReLU The flag implies negative slope being 0. On training this is the only configuration supported. For inference, to use non-zero negative slope consider using dev_guide_attributes_post_ops. If specified: - on inference this option behaves the same as if the primitive were fused with ReLU using post ops API with zero negative slope. - on training primitive requires workspace (required to be able to perform backward pass) | +| dnnl\\_fuse\\_norm\\_add\\_relu | Fuse with Add and then fuse with ReLU If specified: - on forward propagation apply element-wise binary Add operation to to the normalization results with an additional input tensor and then apply ReLU with negative slope being 0. - on training primitive requires workspace (required to be able to perform backward pass). - on backward propagation save the result of backward ReLU operation with input tensor and workspace from forward pass to extra output tensor and then perform backward normalization. | +""" +@cenum dnnl_normalization_flags_t::UInt32 begin + dnnl_normalization_flags_none = 0 + dnnl_use_global_stats = 1 + dnnl_use_scale = 2 + dnnl_use_shift = 4 + dnnl_fuse_norm_relu = 8 + dnnl_fuse_norm_add_relu = 16 +end + +""" + __JL_Ctag_1 + +` DO_NOT_DOCUMENT_THIS` + +Hex representation for a **special** quiet NAN (!= NAN from math.h) +""" +struct __JL_Ctag_1 + data::NTuple{4, UInt8} +end + +function Base.getproperty(x::Ptr{__JL_Ctag_1}, f::Symbol) + f === :u && return Ptr{Cuint}(x + 0) + f === :f && return Ptr{Cfloat}(x + 0) + return getfield(x, f) +end + +function Base.getproperty(x::__JL_Ctag_1, f::Symbol) + r = Ref{__JL_Ctag_1}(x) + ptr = Base.unsafe_convert(Ptr{__JL_Ctag_1}, r) + fptr = getproperty(ptr, f) + GC.@preserve r unsafe_load(fptr) +end + +function Base.setproperty!(x::Ptr{__JL_Ctag_1}, f::Symbol, v) + unsafe_store!(getproperty(x, f), v) +end + +""" +` dnnl_memory_desc` + +An opaque structure to describe a memory descriptor. +""" +mutable struct dnnl_memory_desc end + +""" +A memory descriptor handle. +""" +const dnnl_memory_desc_t = Ptr{dnnl_memory_desc} + +""" +A memory descriptor handle. +""" +const const_dnnl_memory_desc_t = Ptr{dnnl_memory_desc} + +""" +` dnnl_memory` + +An opaque structure to describe a memory. +""" +mutable struct dnnl_memory end + +""" +A memory handle. +""" +const dnnl_memory_t = Ptr{dnnl_memory} + +""" +A constant memory handle. +""" +const const_dnnl_memory_t = Ptr{dnnl_memory} + +""" + dnnl_rnn_flags_t + +Flags for RNN cell. + +| Enumerator | Note | +| :---------------------------------------------- | :------------------------------------------------------------- | +| dnnl\\_rnn\\_flags\\_undef | Undefined RNN flags | +| dnnl\\_rnn\\_flags\\_diff\\_weights\\_overwrite | Do not add weights gradient to existing diff\\_weights memory | +""" +@cenum dnnl_rnn_flags_t::UInt32 begin + dnnl_rnn_flags_undef = 0 + dnnl_rnn_flags_diff_weights_overwrite = 1 +end + +""" + dnnl_rnn_direction_t + +A direction of RNN primitive execution. + +| Enumerator | Note | +| :--------------------------------- | :--------------------------------------------------------------------------- | +| dnnl\\_rnn\\_direction\\_undef | Undefined RNN direction. | +| dnnl\\_unidirectional\\_left2right | Unidirectional execution of RNN primitive from left to right. | +| dnnl\\_unidirectional\\_right2left | Unidirectional execution of RNN primitive from right to left. | +| dnnl\\_bidirectional\\_concat | Bidirectional execution of RNN primitive with concatenation of the results. | +| dnnl\\_bidirectional\\_sum | Bidirectional execution of RNN primitive with summation of the results. | +""" +@cenum dnnl_rnn_direction_t::UInt32 begin + dnnl_rnn_direction_undef = 0 + dnnl_unidirectional_left2right = 1 + dnnl_unidirectional_right2left = 2 + dnnl_bidirectional_concat = 3 + dnnl_bidirectional_sum = 4 +end + +""" +` dnnl_primitive_desc` + +An opaque structure to describe a primitive descriptor. +""" +mutable struct dnnl_primitive_desc end + +""" +A primitive descriptor handle. +""" +const dnnl_primitive_desc_t = Ptr{dnnl_primitive_desc} + +""" +A constant primitive descriptor handle. +""" +const const_dnnl_primitive_desc_t = Ptr{dnnl_primitive_desc} + +""" + dnnl_scratchpad_mode_t + +Scratchpad mode + +| Enumerator | Note | +| :--------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_scratchpad\\_mode\\_library | The library manages the scratchpad allocation according to the policy specified by the `DNNL_ENABLE_CONCURRENT_EXEC` [build option](dev_guide_build_options) (default). When `DNNL\\_ENABLE\\_CONCURRENT\\_EXEC=OFF` (default), the library scratchpad is common to all primitives to reduce the memory footprint. This configuration comes with limited thread-safety properties, namely primitives can be created and executed in parallel but cannot migrate between threads (in other words, each primitive should be executed in the same thread it was created in). When `DNNL\\_ENABLE\\_CONCURRENT\\_EXEC=ON`, the library scratchpad is private to each primitive. The memory footprint is larger than when using `DNNL\\_ENABLE\\_CONCURRENT\\_EXEC=OFF` but different primitives can be created and run concurrently (the same primitive cannot be run concurrently from two different threads though). | +| dnnl\\_scratchpad\\_mode\\_user | The user manages the scratchpad allocation by querying and providing the scratchpad memory to primitives. This mode is thread-safe as long as the scratchpad buffers are not used concurrently by two primitive executions. | +""" +@cenum dnnl_scratchpad_mode_t::UInt32 begin + dnnl_scratchpad_mode_library = 0 + dnnl_scratchpad_mode_user = 1 +end + +""" +` dnnl_primitive_attr` + +An opaque structure for primitive descriptor attributes. + +Attributes may contain: - output scales (to scale the result prior to storing it to the memory) +""" +mutable struct dnnl_primitive_attr end + +""" +A primitive descriptor attributes handle that controls primitive behavior. +""" +const dnnl_primitive_attr_t = Ptr{dnnl_primitive_attr} + +""" +A constant primitive descriptor attributes handle. +""" +const const_dnnl_primitive_attr_t = Ptr{dnnl_primitive_attr} + +""" +` dnnl_post_ops` + +An opaque structure for a chain of post operations. + +[`dnnl_post_ops`](@ref) can be used to perform some (trivial) operations like accumulation or eltwise after certain primitives like convolution. + +Post operations might be combined together, making a chain of post operations. For instance one can configure convolution followed by accumulation followed by eltwise. This might be especially beneficial for residual learning blocks. + +!!! warning + + Of course not all combinations are supported, so the user should handle errors accordingly. + +Supported post operations: - accumulation (base primitive: convolution) - eltwise (base primitive: convolution) +""" +mutable struct dnnl_post_ops end + +""" +A post operation chain handle. +""" +const dnnl_post_ops_t = Ptr{dnnl_post_ops} + +""" +A constant post operation chain handle. +""" +const const_dnnl_post_ops_t = Ptr{dnnl_post_ops} + +""" +` dnnl_primitive` + +An opaque structure to describe a primitive. +""" +mutable struct dnnl_primitive end + +""" +A primitive handle. +""" +const dnnl_primitive_t = Ptr{dnnl_primitive} + +""" +A constant primitive handle. +""" +const const_dnnl_primitive_t = Ptr{dnnl_primitive} + +""" + dnnl_exec_arg_t + +A structure that contains an index and a memory object, and is used to pass arguments to [`dnnl_primitive_execute`](@ref)(). + +| Field | Note | +| :----- | :---------------------------------------------- | +| arg | An argument index, e.g. [`DNNL_ARG_SRC`](@ref) | +| memory | Input/output memory | +""" +struct dnnl_exec_arg_t + arg::Cint + memory::dnnl_memory_t +end + +""" + dnnl_query_t + +Primitive descriptor query specification + +For generic function [`dnnl_primitive_desc_query`](@ref)(), the type of result must agree with the queried argument. The correspondence table: + +Query kind | Type of query result --------------------------------|----------------------------- dnnl\\_query\\_*\\_engine | #[`dnnl_engine_t`](@ref) * #dnnl\\_query\\_primitive\\_kind | #[`dnnl_primitive_kind_t`](@ref) * dnnl\\_query\\_*\\_s32 | int * dnnl\\_query\\_*\\_s64 | #[`dnnl_dim_t`](@ref) * (same as int64\\_t *) dnnl\\_query\\_*\\_f32 | float * dnnl\\_query\\_*\\_f64 | double * dnnl\\_query\\_*\\_str | const char ** dnnl\\_query\\_*\\_md | #[`const_dnnl_memory_desc_t`](@ref) * dnnl\\_query\\_*\\_pd | #[`const_dnnl_primitive_desc_t`](@ref) * dnnl\\_query\\_cache\\_blob\\_id | const uint8\\_t ** dnnl\\_query\\_strides | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_dilations | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_padding\\_l | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_padding\\_r | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_flags | unsigned * dnnl\\_query\\_alg\\_kind | #[`dnnl_alg_kind_t`](@ref) * dnnl\\_query\\_factors | const float ** dnnl\\_query\\_cell\\_kind | #[`dnnl_alg_kind_t`](@ref) * dnnl\\_query\\_direction | #[`dnnl_rnn_direction_t`](@ref) * dnnl\\_query\\_activation\\_kind | #[`dnnl_alg_kind_t`](@ref) * dnnl\\_query\\_kernel | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_dims | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_data\\_type | #[`dnnl_data_type_t`](@ref) * dnnl\\_query\\_padded\\_dims | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_padded\\_offsets | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_format\\_kind | #[`dnnl_format_kind_t`](@ref) * dnnl\\_query\\_inner\\_blks | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_inner\\_idxs | const #[`dnnl_dims_t`](@ref) ** dnnl\\_query\\_sparse\\_encoding | #dnnl\\_sparse\\_encoding\\_t * + +!!! note + + Rule of thumb: all opaque types and structures are returned by reference. All numbers are returned by value. + +!!! warning + + All returned references point to constant objects and are valid only during the lifetime of the queried primitive descriptor. Returned objects must not be destroyed by the user. If you need to keep the object longer than the lifetime of the queried primitive descriptor, use [`dnnl_primitive_desc_clone`](@ref)() to make a copy. + +| Enumerator | Note | +| :-------------------------------------------- | :--------------------------------------- | +| dnnl\\_query\\_undef | no query | +| dnnl\\_query\\_engine | execution engine | +| dnnl\\_query\\_primitive\\_kind | primitive kind | +| dnnl\\_query\\_num\\_of\\_inputs\\_s32 | number of inputs expected | +| dnnl\\_query\\_num\\_of\\_outputs\\_s32 | number of outputs expected | +| dnnl\\_query\\_time\\_estimate\\_f64 | runtime estimation (seconds) | +| dnnl\\_query\\_memory\\_consumption\\_s64 | memory consumption -- extra | +| dnnl\\_query\\_scratchpad\\_engine | scratchpad engine -- engine to be used | +| dnnl\\_query\\_impl\\_info\\_str | implementation name | +| dnnl\\_query\\_reorder\\_src\\_engine | source engine | +| dnnl\\_query\\_reorder\\_dst\\_engine | destination engine | +| dnnl\\_query\\_prop\\_kind | propagation kind | +| dnnl\\_query\\_cache\\_blob\\_id\\_size\\_s64 | size of cache blob ID in bytes | +| dnnl\\_query\\_cache\\_blob\\_id | cache blob ID (pointer to array) | +| dnnl\\_query\\_strides | strides | +| dnnl\\_query\\_dilations | dilations | +| dnnl\\_query\\_padding\\_l | left padding | +| dnnl\\_query\\_padding\\_r | right padding | +| dnnl\\_query\\_epsilon\\_f32 | epsilon | +| dnnl\\_query\\_flags | flags | +| dnnl\\_query\\_alg\\_kind | algorithm kind | +| dnnl\\_query\\_alpha\\_f32 | alpha | +| dnnl\\_query\\_beta\\_f32 | beta | +| dnnl\\_query\\_axis\\_s32 | axis | +| dnnl\\_query\\_local\\_size\\_s64 | LRN parameter local size | +| dnnl\\_query\\_k\\_f32 | LRN parameter K | +| dnnl\\_query\\_p\\_f32 | Reduction parameter P | +| dnnl\\_query\\_factors | Resampling parameter factors | +| dnnl\\_query\\_cell\\_kind | RNN parameter cell kind | +| dnnl\\_query\\_direction | RNN parameter direction | +| dnnl\\_query\\_activation\\_kind | RNN parameter activation kind | +| dnnl\\_query\\_kernel | Pooling parameter kernel | +| dnnl\\_query\\_group\\_size\\_s64 | Shuffle parameter group size | +| dnnl\\_query\\_some\\_md | stub | +| dnnl\\_query\\_src\\_md | source memory desc | +| dnnl\\_query\\_diff\\_src\\_md | source gradient memory desc | +| dnnl\\_query\\_weights\\_md | weights memory descriptor desc | +| dnnl\\_query\\_diff\\_weights\\_md | weights grad. memory desc | +| dnnl\\_query\\_dst\\_md | destination memory desc | +| dnnl\\_query\\_diff\\_dst\\_md | destination grad. memory desc | +| dnnl\\_query\\_workspace\\_md | workspace memory desc | +| dnnl\\_query\\_scratchpad\\_md | scratchpad memory desc | +| dnnl\\_query\\_exec\\_arg\\_md | memory desc of an execute argument | +| dnnl\\_query\\_ndims\\_s32 | number of dimensions | +| dnnl\\_query\\_dims | vector of dimensions | +| dnnl\\_query\\_data\\_type | data type | +| dnnl\\_query\\_submemory\\_offset\\_s64 | submemory offset | +| dnnl\\_query\\_padded\\_dims | vector of padded dimensions | +| dnnl\\_query\\_padded\\_offsets | vector of padded offsets | +| dnnl\\_query\\_format\\_kind | format kind | +| dnnl\\_query\\_inner\\_nblks\\_s32 | number of innermost blocks | +| dnnl\\_query\\_inner\\_blks | vector of sizes of the innermost blocks | +| dnnl\\_query\\_inner\\_idxs | vector of logical indices of the blocks | +""" +@cenum dnnl_query_t::UInt32 begin + dnnl_query_undef = 0 + dnnl_query_engine = 1 + dnnl_query_primitive_kind = 2 + dnnl_query_num_of_inputs_s32 = 3 + dnnl_query_num_of_outputs_s32 = 4 + dnnl_query_time_estimate_f64 = 5 + dnnl_query_memory_consumption_s64 = 6 + dnnl_query_scratchpad_engine = 7 + dnnl_query_impl_info_str = 8 + dnnl_query_reorder_src_engine = 9 + dnnl_query_reorder_dst_engine = 10 + dnnl_query_prop_kind = 11 + dnnl_query_cache_blob_id_size_s64 = 12 + dnnl_query_cache_blob_id = 13 + dnnl_query_strides = 14 + dnnl_query_dilations = 15 + dnnl_query_padding_l = 16 + dnnl_query_padding_r = 17 + dnnl_query_epsilon_f32 = 18 + dnnl_query_flags = 19 + dnnl_query_alg_kind = 20 + dnnl_query_alpha_f32 = 21 + dnnl_query_beta_f32 = 22 + dnnl_query_axis_s32 = 23 + dnnl_query_local_size_s64 = 24 + dnnl_query_k_f32 = 25 + dnnl_query_p_f32 = 26 + dnnl_query_factors = 27 + dnnl_query_cell_kind = 28 + dnnl_query_direction = 29 + dnnl_query_activation_kind = 30 + dnnl_query_kernel = 31 + dnnl_query_group_size_s64 = 32 + dnnl_query_some_md = 128 + dnnl_query_src_md = 129 + dnnl_query_diff_src_md = 130 + dnnl_query_weights_md = 131 + dnnl_query_diff_weights_md = 132 + dnnl_query_dst_md = 133 + dnnl_query_diff_dst_md = 134 + dnnl_query_workspace_md = 135 + dnnl_query_scratchpad_md = 136 + dnnl_query_exec_arg_md = 255 + dnnl_query_ndims_s32 = 256 + dnnl_query_dims = 257 + dnnl_query_data_type = 258 + dnnl_query_submemory_offset_s64 = 259 + dnnl_query_padded_dims = 260 + dnnl_query_padded_offsets = 261 + dnnl_query_format_kind = 262 + dnnl_query_inner_nblks_s32 = 263 + dnnl_query_inner_blks = 264 + dnnl_query_inner_idxs = 265 + dnnl_query_max = 32767 +end + +""" + dnnl_cpu_isa_t + +CPU instruction set flags + +| Enumerator | Note | +| :---------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------- | +| dnnl\\_cpu\\_isa\\_default | Library choice of ISA (excepting those listed as initial support) | +| dnnl\\_cpu\\_isa\\_sse41 | Intel Streaming SIMD Extensions 4.1 (Intel SSE4.1) | +| dnnl\\_cpu\\_isa\\_avx | Intel Advanced Vector Extensions (Intel AVX) | +| dnnl\\_cpu\\_isa\\_avx2 | Intel Advanced Vector Extensions 2 (Intel AVX2) | +| dnnl\\_cpu\\_isa\\_avx2\\_vnni | Intel AVX2 and Intel Deep Learning Boost (Intel DL Boost) support | +| dnnl\\_cpu\\_isa\\_avx2\\_vnni\\_2 | Intel AVX2 and Intel Deep Learning Boost (Intel DL Boost) with 8-bit integer, float16 and bfloat16 support | +| dnnl\\_cpu\\_isa\\_avx512\\_core | Intel AVX-512 subset for Intel Xeon Scalable processor family and Intel Core processor family. | +| dnnl\\_cpu\\_isa\\_avx512\\_core\\_vnni | Intel AVX-512 and Intel Deep Learning Boost (Intel DL Boost) support for Intel Xeon Scalable processor family and Intel Core processor family. | +| dnnl\\_cpu\\_isa\\_avx512\\_core\\_bf16 | Intel AVX-512, Intel DL Boost and bfloat16 support for Intel Xeon Scalable processor family and Intel Core processor family. | +| dnnl\\_cpu\\_isa\\_avx10\\_1\\_512 | Intel AVX-512 with float16, Intel DL Boost and bfloat16 support for Intel Xeon Scalable processor family and Intel Core processor family. | +| dnnl\\_cpu\\_isa\\_avx512\\_core\\_fp16 | dnnl_cpu_isa_avx10_1_512 | +| dnnl\\_cpu\\_isa\\_avx10\\_1\\_512\\_amx | Intel AVX-512 with float16, Intel DL Boost and bfloat16 support and Intel AMX with 8-bit integer and bfloat16 support | +| dnnl\\_cpu\\_isa\\_avx512\\_core\\_amx | dnnl_cpu_isa_avx10_1_512_amx | +| dnnl\\_cpu\\_isa\\_avx10\\_1\\_512\\_amx\\_fp16 | Intel AVX-512 with float16, Intel DL Boost and bfloat16 support and Intel AMX with 8-bit integer, bfloat16 and float16 support | +| dnnl\\_cpu\\_isa\\_avx512\\_core\\_amx\\_fp16 | dnnl_cpu_isa_avx10_1_512_amx_fp16 | +""" +@cenum dnnl_cpu_isa_t::UInt32 begin + dnnl_cpu_isa_default = 0 + dnnl_cpu_isa_sse41 = 1 + dnnl_cpu_isa_avx = 3 + dnnl_cpu_isa_avx2 = 7 + dnnl_cpu_isa_avx2_vnni = 15 + dnnl_cpu_isa_avx2_vnni_2 = 31 + dnnl_cpu_isa_avx512_core = 39 + dnnl_cpu_isa_avx512_core_vnni = 103 + dnnl_cpu_isa_avx512_core_bf16 = 231 + dnnl_cpu_isa_avx10_1_512 = 495 + dnnl_cpu_isa_avx512_core_fp16 = 495 + dnnl_cpu_isa_avx10_1_512_amx = 4079 + dnnl_cpu_isa_avx512_core_amx = 4079 + dnnl_cpu_isa_avx10_1_512_amx_fp16 = 8175 + dnnl_cpu_isa_avx512_core_amx_fp16 = 8175 +end + +""" + dnnl_cpu_isa_hints_t + +CPU ISA hints flags + +| Enumerator | Note | +| :------------------------------ | :-------------------------------------------------------- | +| dnnl\\_cpu\\_isa\\_no\\_hints | No hints (use default features) | +| dnnl\\_cpu\\_isa\\_prefer\\_ymm | Prefer to exclusively use Ymm registers for computations | +""" +@cenum dnnl_cpu_isa_hints_t::UInt32 begin + dnnl_cpu_isa_no_hints = 0 + dnnl_cpu_isa_prefer_ymm = 1 +end + +""" + dnnl_primitive_desc_next_impl(primitive_desc) + +Changes the primitive descriptor to point to the next available implementation. + +# Arguments +* `primitive_desc`: A primitive descriptor to change. +# Returns +#dnnl\\_last\\_impl\\_reached if no more implementations available, in which case the primitive descriptor itself is kept unchanged. +""" +function dnnl_primitive_desc_next_impl(primitive_desc) + @ccall libdnnl.dnnl_primitive_desc_next_impl(primitive_desc::dnnl_primitive_desc_t)::dnnl_status_t +end + +""" + dnnl_primitive_desc_clone(primitive_desc, existing_primitive_desc) + +Clones a primitive descriptor. The resulting primitive descriptor must be destroyed separately. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `existing_primitive_desc`: Primitive descriptor to clone. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_desc_clone(primitive_desc, existing_primitive_desc) + @ccall libdnnl.dnnl_primitive_desc_clone(primitive_desc::Ptr{dnnl_primitive_desc_t}, existing_primitive_desc::const_dnnl_primitive_desc_t)::dnnl_status_t +end + +""" + dnnl_primitive_desc_get_attr(primitive_desc, attr) + +Returns a constant reference to the attributes of a primitive descriptor. + +!!! warning + + It is an error to destroy the resulting `attr`. + +!!! warning + + The lifetime of an `attr` is the same as that of a `primitive_desc`, so it is an error to use the `attr` once the `primitive_desc` has been destroyed. + +# Arguments +* `primitive_desc`: Primitive descriptor. +* `attr`: Output primitive attributes. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_desc_get_attr(primitive_desc, attr) + @ccall libdnnl.dnnl_primitive_desc_get_attr(primitive_desc::const_dnnl_primitive_desc_t, attr::Ptr{const_dnnl_primitive_attr_t})::dnnl_status_t +end + +""" + dnnl_primitive_desc_destroy(primitive_desc) + +Destroys a primitive descriptor. + +# Arguments +* `primitive_desc`: Primitive descriptor to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_desc_destroy(primitive_desc) + @ccall libdnnl.dnnl_primitive_desc_destroy(primitive_desc::dnnl_primitive_desc_t)::dnnl_status_t +end + +""" + dnnl_primitive_desc_query(primitive_desc, what, index, result) + +Queries a primitive descriptor for various pieces of information. + +The most common use case is to query a primitive descriptor, created with source, weights, and destination memory descriptors with format tags set to #dnnl\\_format\\_tag\\_any, for the corresponding memory descriptors (in this case the `what` is set to #dnnl\\_query\\_src\\_md, #dnnl\\_query\\_weights\\_md, and #dnnl\\_query\\_dst\\_md respectively) so that it is possible to create memory objects and reorder primitives if necessary. + +Another typical use case is to query a primitive descriptor for workspace memory descriptor (with `what` set to #dnnl\\_query\\_workspace\\_md). If this query returns #dnnl\\_not\\_required status, then workspace memory is not required. + +!!! note + + When querying for a memory descriptor for a scratchpad, a workspace, or an optional parameter, the query will return a pointer to a zero memory descriptor if the parameter is not needed. + +A few other use cases: - query a primitive descriptor for the implementation information string (#dnnl\\_query\\_impl\\_info\\_str) - query a primitive descriptor for the number of inputs and outputs (#dnnl\\_query\\_num\\_of\\_inputs\\_s32 and #dnnl\\_query\\_num\\_of\\_outputs\\_s32 respectively) + +# Arguments +* `primitive_desc`: Primitive descriptor. +* `what`: Parameter to query. +* `index`: Index of the parameter to query for. +* `result`: Output result. The type depends on the query. For example, it must be a [`dnnl_memory_desc_t`](@ref)* if querying for a memory descriptor. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_query_t`](@ref) for more options +""" +function dnnl_primitive_desc_query(primitive_desc, what, index, result) + @ccall libdnnl.dnnl_primitive_desc_query(primitive_desc::const_dnnl_primitive_desc_t, what::dnnl_query_t, index::Cint, result::Ptr{Cvoid})::dnnl_status_t +end + +""" + dnnl_primitive_desc_query_md(primitive_desc, what, index) + +Queries primitive descriptor for a memory descriptor. + +!!! note + + This function is a convenience version of #[`dnnl_primitive_desc_query`](@ref)(). + +# Arguments +* `primitive_desc`: Primitive descriptor. +* `what`: Kind of memory descriptor parameter to query for. +* `index`: Index of the parameter to query. +# Returns +NULL in case of any error. +""" +function dnnl_primitive_desc_query_md(primitive_desc, what, index) + @ccall libdnnl.dnnl_primitive_desc_query_md(primitive_desc::const_dnnl_primitive_desc_t, what::dnnl_query_t, index::Cint)::const_dnnl_memory_desc_t +end + +""" + dnnl_primitive_desc_query_s32(primitive_desc, what, index) + +Queries primitive descriptor for a signed 32bit int. + +!!! note + + This function is a convenience version of #[`dnnl_primitive_desc_query`](@ref)(). + +# Arguments +* `primitive_desc`: Primitive descriptor. +* `what`: Kind of the value to query for. +* `index`: Index of the parameter to query. +# Returns +0 in case of any error (in particular if the queried entity is not of type int32\\_t). Note that 0 may also be the actual returned value. +""" +function dnnl_primitive_desc_query_s32(primitive_desc, what, index) + @ccall libdnnl.dnnl_primitive_desc_query_s32(primitive_desc::const_dnnl_primitive_desc_t, what::dnnl_query_t, index::Cint)::Cint +end + +""" + dnnl_primitive_create(primitive, primitive_desc) + +Creates a primitive. + +# Arguments +* `primitive`: Output primitive. +* `primitive_desc`: Primitive descriptor used to create the primitive. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_create(primitive, primitive_desc) + @ccall libdnnl.dnnl_primitive_create(primitive::Ptr{dnnl_primitive_t}, primitive_desc::const_dnnl_primitive_desc_t)::dnnl_status_t +end + +""" + dnnl_primitive_create_from_cache_blob(primitive, primitive_desc, size, cache_blob) + +Creates a primitive from a cache blob. + +# Arguments +* `primitive`: Output primitive. +* `primitive_desc`: Primitive descriptor used to create the primitive. +* `size`: Size of the cache blob in bytes. +* `cache_blob`: Cache blob of size `size`. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_create_from_cache_blob(primitive, primitive_desc, size, cache_blob) + @ccall libdnnl.dnnl_primitive_create_from_cache_blob(primitive::Ptr{dnnl_primitive_t}, primitive_desc::const_dnnl_primitive_desc_t, size::Csize_t, cache_blob::Ptr{UInt8})::dnnl_status_t +end + +""" + dnnl_primitive_execute(primitive, stream, nargs, args) + +!!! note + + If any argument in `args` is padded (padded\\_dims > dims), the primitive execution will assume properly zero-padded input arguments, and produce zero-padded output arguments. +""" +function dnnl_primitive_execute(primitive, stream, nargs, args) + @ccall libdnnl.dnnl_primitive_execute(primitive::const_dnnl_primitive_t, stream::dnnl_stream_t, nargs::Cint, args::Ptr{dnnl_exec_arg_t})::dnnl_status_t +end + +""" + dnnl_primitive_get_primitive_desc(primitive, primitive_desc) + +Retrieves a constant reference to the primitive descriptor of a given primitive. + +!!! warning + + It is an error to destroy the returned object. It is owned by the primitive. The `const` qualifier of the returned object prevents such attempts. + +# Arguments +* `primitive`: Primitive to query for the primitive descriptor. +* `primitive_desc`: Output primitive descriptor. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_get_primitive_desc(primitive, primitive_desc) + @ccall libdnnl.dnnl_primitive_get_primitive_desc(primitive::const_dnnl_primitive_t, primitive_desc::Ptr{const_dnnl_primitive_desc_t})::dnnl_status_t +end + +""" + dnnl_primitive_get_cache_blob(primitive, size, cache_blob) + +Retrieves a cache blob associated with the given primitive. + +!!! note + + The cache blob can be empty. It's the user's responsibility to check whether it's empty prior to passing it to #[`dnnl_primitive_create_from_cache_blob`](@ref)(). + +# Arguments +* `primitive`: Primitive to query for the cache blob. +* `size`: Size of the cache blob in bytes. +* `cache_blob`: Cache blob of size `size`. If the `cache_blob` is nullptr then the size of the cache blob is returned in `size`. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_get_cache_blob(primitive, size, cache_blob) + @ccall libdnnl.dnnl_primitive_get_cache_blob(primitive::const_dnnl_primitive_t, size::Ptr{Csize_t}, cache_blob::Ptr{UInt8})::dnnl_status_t +end + +""" + dnnl_primitive_destroy(primitive) + +Destroys a primitive. + +# Arguments +* `primitive`: The primitive to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_destroy(primitive) + @ccall libdnnl.dnnl_primitive_destroy(primitive::dnnl_primitive_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_create(attr) + +Creates an empty (default) primitive attributes with all the parameters set to their default values. + +Empty attributes are implied whenever the respective argument is NULL. + +# Arguments +* `attr`: Output primitive attributes. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_create(attr) + @ccall libdnnl.dnnl_primitive_attr_create(attr::Ptr{dnnl_primitive_attr_t})::dnnl_status_t +end + +""" + dnnl_primitive_attr_clone(attr, existing_attr) + +Clones primitive attributes. + +# Arguments +* `attr`: Output primitive attributes. +* `existing_attr`: Primitive attributes to clone. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_clone(attr, existing_attr) + @ccall libdnnl.dnnl_primitive_attr_clone(attr::Ptr{dnnl_primitive_attr_t}, existing_attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_destroy(attr) + +Destroys primitive attributes. + +# Arguments +* `attr`: Primitive attributes to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_destroy(attr) + @ccall libdnnl.dnnl_primitive_attr_destroy(attr::dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_fpmath_mode(attr, mode) + +Returns the floating-point math mode primitive attribute. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: Output FP math mode. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_fpmath_mode(attr, mode) + @ccall libdnnl.dnnl_primitive_attr_get_fpmath_mode(attr::const_dnnl_primitive_attr_t, mode::Ptr{dnnl_fpmath_mode_t})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_fpmath_mode(attr, mode) + +Sets the floating-point math mode primitive attributes. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: FP math mode. The possible values are: #dnnl\\_fpmath\\_mode\\_strict (default), #dnnl\\_fpmath\\_mode\\_bf16, #dnnl\\_fpmath\\_mode\\_f16, #dnnl\\_fpmath\\_mode\\_tf32, #dnnl\\_fpmath\\_mode\\_any. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_fpmath_mode(attr, mode) + @ccall libdnnl.dnnl_primitive_attr_set_fpmath_mode(attr::dnnl_primitive_attr_t, mode::dnnl_fpmath_mode_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_fpmath_mode_v2(attr, mode, apply_to_int) + +Returns the floating-point math mode primitive attribute. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: Output FP math mode. +* `apply_to_int`: Output use floating-point arithmetic for integer primitives. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_fpmath_mode_v2(attr, mode, apply_to_int) + @ccall libdnnl.dnnl_primitive_attr_get_fpmath_mode_v2(attr::const_dnnl_primitive_attr_t, mode::Ptr{dnnl_fpmath_mode_t}, apply_to_int::Ptr{Cint})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_fpmath_mode_v2(attr, mode, apply_to_int) + +Sets the floating-point math mode primitive attributes. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: FP math mode. The possible values are: #dnnl\\_fpmath\\_mode\\_strict (default), #dnnl\\_fpmath\\_mode\\_bf16, #dnnl\\_fpmath\\_mode\\_f16, #dnnl\\_fpmath\\_mode\\_tf32, #dnnl\\_fpmath\\_mode\\_any. +* `apply_to_int`: Boolean. Use of floating-point arithmetic for integer primitives. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_fpmath_mode_v2(attr, mode, apply_to_int) + @ccall libdnnl.dnnl_primitive_attr_set_fpmath_mode_v2(attr::dnnl_primitive_attr_t, mode::dnnl_fpmath_mode_t, apply_to_int::Cint)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_deterministic(attr, value) + +Returns the deterministic primitive attribute value. + +# Arguments +* `attr`: Primitive attributes. +* `value`: Output deterministic attribute value +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_deterministic(attr, value) + @ccall libdnnl.dnnl_primitive_attr_get_deterministic(attr::const_dnnl_primitive_attr_t, value::Ptr{Cint})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_deterministic(attr, value) + +Sets the deterministic primitive attribute value. + +# Arguments +* `attr`: Primitive attributes. +* `value`: Boolean value to set deterministic attribute. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_deterministic(attr, value) + @ccall libdnnl.dnnl_primitive_attr_set_deterministic(attr::dnnl_primitive_attr_t, value::Cint)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_accumulation_mode(attr, mode) + +Returns the accumulation mode primitive attribute. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: Output accumulation mode. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_accumulation_mode(attr, mode) + @ccall libdnnl.dnnl_primitive_attr_get_accumulation_mode(attr::const_dnnl_primitive_attr_t, mode::Ptr{dnnl_accumulation_mode_t})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_accumulation_mode(attr, mode) + +Sets the accumulation mode primitive attribute. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: Accumulation mode. The possible values are: #dnnl\\_accumulation\\_mode\\_strict (default), which is s32 for quantized primitives, f32/f64 otherwise #dnnl\\_accumulation\\_mode\\_relaxed, which is same as strict but allows intermediate accumulators to be in src/dst datatype #dnnl\\_accumulation\\_mode\\_any, which allows accumulators to be src/dst datatype or any wider type. #dnnl\\_accumulation\\_mode\\_f32, #dnnl\\_accumulation\\_mode\\_s32, #dnnl\\_accumulation\\_mode\\_f16. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_accumulation_mode(attr, mode) + @ccall libdnnl.dnnl_primitive_attr_set_accumulation_mode(attr::dnnl_primitive_attr_t, mode::dnnl_accumulation_mode_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_scratchpad_mode(attr, mode) + +Returns the primitive attributes scratchpad mode. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: Output scratchpad mode. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_scratchpad_mode(attr, mode) + @ccall libdnnl.dnnl_primitive_attr_get_scratchpad_mode(attr::const_dnnl_primitive_attr_t, mode::Ptr{dnnl_scratchpad_mode_t})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_scratchpad_mode(attr, mode) + +Sets primitive attributes scratchpad mode. + +# Arguments +* `attr`: Primitive attributes. +* `mode`: Scratchpad mode. The possible values are: #dnnl\\_scratchpad\\_mode\\_library (default) and #dnnl\\_scratchpad\\_mode\\_user. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_scratchpad_mode(attr, mode) + @ccall libdnnl.dnnl_primitive_attr_set_scratchpad_mode(attr::dnnl_primitive_attr_t, mode::dnnl_scratchpad_mode_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_scales_mask(attr, arg, mask) + +Sets primitive attributes scaling factors for primitive operations for a given memory argument. The scaling factors must be passed at execution time as an argument with index #[`DNNL_ARG_ATTR_SCALES`](@ref) | arg. + +# Arguments +* `attr`: Primitive attributes. +* `arg`: Parameter argument index as passed to the [`dnnl_primitive_execute`](@ref)() call. +* `mask`: Scaling factors correspondence mask that defines the correspondence between the tensor dimensions and the `scales` array. The set i-th bit indicates that a dedicated scaling factor is used for each index along that dimension. Set the mask to 0 to use a common scaling factor for the whole output tensor. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_primitive_attr_set_scales_mask`](@ref) +""" +function dnnl_primitive_attr_set_scales_mask(attr, arg, mask) + @ccall libdnnl.dnnl_primitive_attr_set_scales_mask(attr::dnnl_primitive_attr_t, arg::Cint, mask::Cint)::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_scales(attr, arg, mask, ndims, group_dims, data_type) + +Sets primitive attributes scaling factors for primitive operations for a given memory argument. The scaling factors must be passed at execution time as an argument with index #[`DNNL_ARG_ATTR_SCALES`](@ref) | arg. + +# Arguments +* `attr`: Primitive attributes. +* `arg`: Parameter argument index as passed to the [`dnnl_primitive_execute`](@ref)() call. +* `mask`: Scaling factors correspondence mask that defines the correspondence between the tensor dimensions and the `scales` array. The set i-th bit indicates that a dedicated scaling factor is used for each index along that dimension. Set the mask to 0 to use a common scaling factor for the whole output tensor. +* `ndims`: Number of group dimensions. +* `group_dims`: Scaling factors correspondence groups that define the correspondence between the tensor dimensions and the scales array. The group dimensions should only be provided for each logical dimension that has correspondence mask `mask` set. +* `data_type`: Scaling factors data\\_type. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_primitive_attr_set_scales`](@ref) +""" +function dnnl_primitive_attr_set_scales(attr, arg, mask, ndims, group_dims, data_type) + @ccall libdnnl.dnnl_primitive_attr_set_scales(attr::dnnl_primitive_attr_t, arg::Cint, mask::Cint, ndims::Cint, group_dims::Ptr{Clong}, data_type::dnnl_data_type_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_zero_points_mask(attr, arg, mask) + +Sets primitive attributes zero points for primitive operations for a given memory argument. The zero points must be passed at execution time as an argument with index #[`DNNL_ARG_ATTR_ZERO_POINTS`](@ref) | arg. + +# Arguments +* `attr`: Primitive attributes. +* `arg`: Parameter argument index as passed to the [`dnnl_primitive_execute`](@ref)() call. +* `mask`: Zero point correspondence mask that defines the correspondence between the tensor dimensions and the `zero_points` array. The set i-th bit indicates that a dedicated zero point is used for each index along that dimension. Set the mask to 0 to use a common zero point for the whole output tensor. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_primitive_attr_set_zero_points_mask`](@ref) +""" +function dnnl_primitive_attr_set_zero_points_mask(attr, arg, mask) + @ccall libdnnl.dnnl_primitive_attr_set_zero_points_mask(attr::dnnl_primitive_attr_t, arg::Cint, mask::Cint)::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_zero_points(attr, arg, mask, ndims, group_dims, data_type) + +Sets primitive attributes zero points for primitive operations for a given memory argument. The zero points must be passed at execution time as an argument with index #[`DNNL_ARG_ATTR_ZERO_POINTS`](@ref) | arg. + +# Arguments +* `attr`: Primitive attributes. +* `arg`: Parameter argument index as passed to the [`dnnl_primitive_execute`](@ref)() call. +* `mask`: Zero point correspondence mask that defines the correspondence between the tensor dimensions and the `zero_points` array. The set i-th bit indicates that a dedicated zero point is used for each index along that dimension. Set the mask to 0 to use a common zero point for the whole output tensor. +* `ndims`: Number of group dimensions. +* `group_dims`: Zero point factors correspondence groups that define the correspondence between the tensor dimensions and the zero\\_points array. The group dimensions should be only provided for each logical dimension that has the bit set correspondence mask `mask` set. +* `data_type`: Zero points factors data\\_type. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_primitive_attr_set_zero_points`](@ref) +""" +function dnnl_primitive_attr_set_zero_points(attr, arg, mask, ndims, group_dims, data_type) + @ccall libdnnl.dnnl_primitive_attr_set_zero_points(attr::dnnl_primitive_attr_t, arg::Cint, mask::Cint, ndims::Cint, group_dims::Ptr{Clong}, data_type::dnnl_data_type_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_post_ops(attr, post_ops) + +Returns primitive attributes post-ops. + +!!! warning + + The output `post_ops` points to the internal `attr` field, so it is an error to modify or destroy them. The lifetime of `post_ops` is the same as that of the `attr` it belongs to, so it is an error to use `post_ops` after `attr` has been destroyed. + +# Arguments +* `attr`: Primitive attributes. +* `post_ops`: Output post-ops. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_post_ops(attr, post_ops) + @ccall libdnnl.dnnl_primitive_attr_get_post_ops(attr::const_dnnl_primitive_attr_t, post_ops::Ptr{const_dnnl_post_ops_t})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_post_ops(attr, post_ops) + +Sets primitive attributes post-ops. + +!!! note + + There is no way to check whether the post-ops would be supported by the target primitive. Any error will be reported by the dnnl\\_\\_[propagation kind]\\_primitive\\_desc\\_create() function call. + +# Arguments +* `attr`: Primitive attributes. +* `post_ops`: Post-ops to set. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_post_ops(attr, post_ops) + @ccall libdnnl.dnnl_primitive_attr_set_post_ops(attr::dnnl_primitive_attr_t, post_ops::const_dnnl_post_ops_t)::dnnl_status_t +end + +""" + dnnl_post_ops_create(post_ops) + +Creates empty post-ops sequence. + +# Arguments +* `post_ops`: Output post-ops. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_create(post_ops) + @ccall libdnnl.dnnl_post_ops_create(post_ops::Ptr{dnnl_post_ops_t})::dnnl_status_t +end + +""" + dnnl_post_ops_clone(post_ops, existing_post_ops) + +Clones post-ops primitive attribute. + +# Arguments +* `post_ops`: Output post-ops primitive attribute. +* `existing_post_ops`: Post-ops primitive attribute to clone. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_clone(post_ops, existing_post_ops) + @ccall libdnnl.dnnl_post_ops_clone(post_ops::Ptr{dnnl_post_ops_t}, existing_post_ops::const_dnnl_post_ops_t)::dnnl_status_t +end + +""" + dnnl_post_ops_destroy(post_ops) + +Destroys post-ops. + +# Arguments +* `post_ops`: Post-ops to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_destroy(post_ops) + @ccall libdnnl.dnnl_post_ops_destroy(post_ops::dnnl_post_ops_t)::dnnl_status_t +end + +""" + dnnl_post_ops_len(post_ops) + +Returns the length of post-ops. + +# Arguments +* `post_ops`: Post-ops. +# Returns +The number of post-ops entries. +""" +function dnnl_post_ops_len(post_ops) + @ccall libdnnl.dnnl_post_ops_len(post_ops::const_dnnl_post_ops_t)::Cint +end + +""" + dnnl_post_ops_get_kind(post_ops, index) + +Returns the kind of a post-op entry. + +# Arguments +* `post_ops`: Post-ops. +* `index`: Post-op entry index. +# Returns +#dnnl\\_undefined\\_primitive if there is no post-op at the specified index. +""" +function dnnl_post_ops_get_kind(post_ops, index) + @ccall libdnnl.dnnl_post_ops_get_kind(post_ops::const_dnnl_post_ops_t, index::Cint)::dnnl_primitive_kind_t +end + +""" + dnnl_post_ops_append_sum(post_ops, scale, zero_point, data_type) + +Appends an accumulation v3 (sum) to post-ops. Prior to accumulating the result, a zero point is subtracted from the previous value and is multiplied by the scale. + +The kind of this post-op is #dnnl\\_sum. + +This feature may improve performance for cases like dequantize the asymmetrically quantized sum's src1 tensor to f32 domain before performing the sum operation by subtracting the `zero_point` before the scaling. + +In the simplest case where accumulation is the only post-op, the computations will be: + +dst[:] <- scale * (dst[:] - zero\\_point) + op(...) // instead of dst[:] <- op(...) + +If `data_type` is specified, original dst tensor will be reinterpreted as a tensor with provided data type. Since it is reinterpretation, data\\_type and dst data type should have the same size. As a result, computations will be: + +dst[:] <- scale * (as\\_data\\_type(dst[:]) - zero\\_point) + op(...) // instead of dst[:] <- op(...) + +!!! note + + This post-op executes in-place and does not change the destination layout. + +# Arguments +* `post_ops`: Post-ops. +* `scale`: Accumulation scaling factor. +* `zero_point`: Single scalar int32\\_t value of zero point. +* `data_type`: Accumulation data\\_type. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_append_sum(post_ops, scale, zero_point, data_type) + @ccall libdnnl.dnnl_post_ops_append_sum(post_ops::dnnl_post_ops_t, scale::Cfloat, zero_point::Int32, data_type::dnnl_data_type_t)::dnnl_status_t +end + +""" + dnnl_post_ops_get_params_sum(post_ops, index, scale, zero_point, data_type) + +Returns the parameters of an accumulation (sum) post-op with zero point and data type parameter. + +# Arguments +* `post_ops`: Post-ops. +* `index`: Index of the sum post-op. +* `scale`: Output accumulation scaling factor. +* `zero_point`: Zero point. +* `data_type`: Data type for accumulation. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_get_params_sum(post_ops, index, scale, zero_point, data_type) + @ccall libdnnl.dnnl_post_ops_get_params_sum(post_ops::const_dnnl_post_ops_t, index::Cint, scale::Ptr{Cfloat}, zero_point::Ptr{Int32}, data_type::Ptr{dnnl_data_type_t})::dnnl_status_t +end + +""" + dnnl_post_ops_append_eltwise(post_ops, alg_kind, alpha, beta) + +Appends an elementwise post-op. + +The kind of this post operation is #dnnl\\_eltwise. + +In the simplest case when the elementwise is the only post operation, the computations would be: + +dst[:] <- eltwise\\_op (op(...)) // instead of dst[:] <- op(...) + +where eltwise\\_op is configured with the given parameters. + +# Arguments +* `post_ops`: Post-ops. +* `alg_kind`: Elementwise algorithm for the post-op. +* `alpha`: Alpha parameter for the elementwise algorithm. +* `beta`: Beta parameter for the elementwise algorithm. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_append_eltwise(post_ops, alg_kind, alpha, beta) + @ccall libdnnl.dnnl_post_ops_append_eltwise(post_ops::dnnl_post_ops_t, alg_kind::dnnl_alg_kind_t, alpha::Cfloat, beta::Cfloat)::dnnl_status_t +end + +""" + dnnl_post_ops_get_params_eltwise(post_ops, index, alg_kind, alpha, beta) + +Returns the parameters of an elementwise post-op. + +# Arguments +* `post_ops`: Post-ops. +* `index`: Index of the elementwise post-op. +* `alg_kind`: Output elementwise algorithm kind. +* `alpha`: Output alpha parameter for the elementwise algorithm. +* `beta`: Output beta parameter for the elementwise algorithm. +# Returns +#dnnl\\_invalid\\_arguments if `index` does not refer to an elementwise post-op. +""" +function dnnl_post_ops_get_params_eltwise(post_ops, index, alg_kind, alpha, beta) + @ccall libdnnl.dnnl_post_ops_get_params_eltwise(post_ops::const_dnnl_post_ops_t, index::Cint, alg_kind::Ptr{dnnl_alg_kind_t}, alpha::Ptr{Cfloat}, beta::Ptr{Cfloat})::dnnl_status_t +end + +""" + dnnl_post_ops_append_dw(post_ops, weights_data_type, bias_data_type, dst_data_type, kernel_size, stride_size, padding_l_size) + +Appends a depthwise post-op convolution. + +This post-op can only be fused with a 2D 1x1 convolution (convolution with weights spatial dimensions equal to 1 i.e., kh=kw=1). + +The kind of this post-op is #dnnl\\_convolution. + +The number of outputs for primitive with fusion is one. The output spatial size can be derived as below: + +output\\_height = ceil(output\\_height\\_1x1\\_convolution, stride) output\\_width = ceil(output\\_width\\_1x1\\_convolution, stride) + +See dev_guide_attributes_post_ops_depthwise and dev_guide_attributes_post_ops_depthwise_fusion for more info. + +# Arguments +* `post_ops`: Post-ops. +* `weights_data_type`: Weights data type of depthwise post-op +* `bias_data_type`: Bias data type of depthwise post-op +* `dst_data_type`: Output data type of depthwise post-op +* `kernel_size`: Size of kernel of depthwise post-op +* `stride_size`: Size of stride of depthwise post-op +* `padding_l_size`: Size of left and top paddings of depthwise post-op +# Returns +#dnnl\\_success on success and a status describing the error otherwise +""" +function dnnl_post_ops_append_dw(post_ops, weights_data_type, bias_data_type, dst_data_type, kernel_size, stride_size, padding_l_size) + @ccall libdnnl.dnnl_post_ops_append_dw(post_ops::dnnl_post_ops_t, weights_data_type::dnnl_data_type_t, bias_data_type::dnnl_data_type_t, dst_data_type::dnnl_data_type_t, kernel_size::dnnl_dim_t, stride_size::dnnl_dim_t, padding_l_size::dnnl_dim_t)::dnnl_status_t +end + +""" + dnnl_post_ops_get_params_dw(post_ops, index, weights_data_type, bias_data_type, dst_data_type, kernel_size, stride_size, padding_l_size) + +Returns the parameters of an depthwise post-op. + +# Arguments +* `post_ops`: Post-ops. +* `index`: Index of the elementwise post-op. +* `weights_data_type`: Weights data type of depthwise post-op +* `bias_data_type`: Bias data type of depthwise post-op +* `dst_data_type`: Output data type of depthwise post-op +* `kernel_size`: Size of kernel of depthwise post-op +* `stride_size`: Size of stride of depthwise post-op +* `padding_l_size`: Size of left and top paddings of depthwise post-op +# Returns +#dnnl\\_success on success and a status describing the error otherwise +""" +function dnnl_post_ops_get_params_dw(post_ops, index, weights_data_type, bias_data_type, dst_data_type, kernel_size, stride_size, padding_l_size) + @ccall libdnnl.dnnl_post_ops_get_params_dw(post_ops::const_dnnl_post_ops_t, index::Cint, weights_data_type::Ptr{dnnl_data_type_t}, bias_data_type::Ptr{dnnl_data_type_t}, dst_data_type::Ptr{dnnl_data_type_t}, kernel_size::Ptr{dnnl_dim_t}, stride_size::Ptr{dnnl_dim_t}, padding_l_size::Ptr{dnnl_dim_t})::dnnl_status_t +end + +""" + dnnl_post_ops_append_binary(post_ops, alg_kind, src1_desc) + +Appends a binary post-op. + +The kind of this post operation is #dnnl\\_binary. + +In the simplest case when the binary is the only post operation, the computations would be: + +dst[:] <- binary\\_op (dst[:], another\\_input[:]) + +where binary\\_op is configured with the given parameters. binary\\_op supports broadcast semantics for a second operand. + +# Arguments +* `post_ops`: Post-ops. +* `alg_kind`: Binary algorithm for the post-op. +* `src1_desc`: Memory descriptor of a second operand. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_append_binary(post_ops, alg_kind, src1_desc) + @ccall libdnnl.dnnl_post_ops_append_binary(post_ops::dnnl_post_ops_t, alg_kind::dnnl_alg_kind_t, src1_desc::const_dnnl_memory_desc_t)::dnnl_status_t +end + +""" + dnnl_post_ops_get_params_binary(post_ops, index, alg_kind, src1_desc) + +Returns the parameters of a binary post-op. + +# Arguments +* `post_ops`: Post-ops. +* `index`: Index of the binary post-op. +* `alg_kind`: Output binary algorithm kind. +* `src1_desc`: Output memory descriptor of a second operand. +# Returns +#dnnl\\_invalid\\_arguments if `index` does not refer to a binary post-op. +""" +function dnnl_post_ops_get_params_binary(post_ops, index, alg_kind, src1_desc) + @ccall libdnnl.dnnl_post_ops_get_params_binary(post_ops::const_dnnl_post_ops_t, index::Cint, alg_kind::Ptr{dnnl_alg_kind_t}, src1_desc::Ptr{const_dnnl_memory_desc_t})::dnnl_status_t +end + +""" + dnnl_post_ops_append_prelu(post_ops, mask) + +Appends a prelu forward post-op. + +The kind of this post-op is #dnnl::primitive::kind::prelu. + +The post-op can be defined as: + +dst[:] <- prelu(dst[:], weights[:]) prelu: dst[:] <- dst[:] if dst[:] > 0 dst[:] <- dst[:] * weights[:] if dst[:] <= 0 + +!!! note + + The order of dimensions does not depend on how elements are laid out in memory. For example: - for a 2D CNN activations tensor the order is always (n, c) - for a 4D CNN activations tensor the order is always (n, c, h, w) - for a 5D CNN weights tensor the order is always (g, oc, ic, kh, kw) + +Prelu weights tensor is passed in runtime execution phase. Prelu weights tensor data type is implicitly assumed as f32 using plain layout (a, ab, acb, acdb, acdeb) + +# Arguments +* `post_ops`: Post-ops. +* `mask`: Defines the correspondence between the output tensor dimensions and the prelu weights tensor. The set i-th bit indicates that a dedicated weights value is used for each index along that dimension. Set the mask to 0 to use a common weights value for the whole output tensor. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_append_prelu(post_ops, mask) + @ccall libdnnl.dnnl_post_ops_append_prelu(post_ops::dnnl_post_ops_t, mask::Cint)::dnnl_status_t +end + +""" + dnnl_post_ops_get_params_prelu(post_ops, index, mask) + +Returns the parameters of a prelu post-op. + +# Arguments +* `post_ops`: Post-ops. +* `index`: Index of the prelu post-op. +* `mask`: Mask of the prelu post-op. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_post_ops_get_params_prelu(post_ops, index, mask) + @ccall libdnnl.dnnl_post_ops_get_params_prelu(post_ops::const_dnnl_post_ops_t, index::Cint, mask::Ptr{Cint})::dnnl_status_t +end + +""" + dnnl_memory_desc_destroy(memory_desc) + +Destroys a memory descriptor. + +# Arguments +* `memory_desc`: Memory descriptor to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_destroy(memory_desc) + @ccall libdnnl.dnnl_memory_desc_destroy(memory_desc::dnnl_memory_desc_t)::dnnl_status_t +end + +""" + dnnl_memory_desc_clone(memory_desc, existing_memory_desc) + +Clones a memory descriptor. The resulting memory descriptor must be destroyed separately. + +# Arguments +* `memory_desc`: Output memory descriptor. +* `existing_memory_desc`: Memory descriptor to clone. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_clone(memory_desc, existing_memory_desc) + @ccall libdnnl.dnnl_memory_desc_clone(memory_desc::Ptr{dnnl_memory_desc_t}, existing_memory_desc::const_dnnl_memory_desc_t)::dnnl_status_t +end + +""" + dnnl_memory_desc_get_blob(blob, size, memory_desc) + +Retrieves a binary blob associated with the given memory descriptor + +# Arguments +* `blob`: Output pointer to binary blob. If not nullptr, size bytes of the memory descriptor blob are written. +* `size`: Output pointer to the size of the binary blob in bytes. Size is written if blob is nullptr. +* `memory_desc`: input memory descriptor to serialize +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_get_blob(blob, size, memory_desc) + @ccall libdnnl.dnnl_memory_desc_get_blob(blob::Ptr{UInt8}, size::Ptr{Csize_t}, memory_desc::const_dnnl_memory_desc_t)::dnnl_status_t +end + +""" + dnnl_memory_desc_create_with_blob(memory_desc, blob) + +Creates a memory descriptor from a memory descriptor binary blob. + +# Arguments +* `memory_desc`: Output pointer to a newly allocated memory descriptor. +* `blob`: Pointer to a memory descriptor binary blob. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_create_with_blob(memory_desc, blob) + @ccall libdnnl.dnnl_memory_desc_create_with_blob(memory_desc::Ptr{dnnl_memory_desc_t}, blob::Ptr{UInt8})::dnnl_status_t +end + +""" + dnnl_memory_desc_create_with_strides(memory_desc, ndims, dims, data_type, strides) + +Creates a memory descriptor using dimensions and strides. + +!!! note + + As always, the logical order of dimensions corresponds to the `abc...` format tag, and the physical meaning of the dimensions depends on both the primitive that consumes the memory and the context of that consumption. + +# Arguments +* `memory_desc`: Output memory descriptor. +* `ndims`: Number of dimensions +* `dims`: Array of dimensions. +* `data_type`: Elements data type. +* `strides`: Strides in each dimension. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_create_with_strides(memory_desc, ndims, dims, data_type, strides) + @ccall libdnnl.dnnl_memory_desc_create_with_strides(memory_desc::Ptr{dnnl_memory_desc_t}, ndims::Cint, dims::Ptr{Clong}, data_type::dnnl_data_type_t, strides::Ptr{Clong})::dnnl_status_t +end + +""" + dnnl_memory_desc_create_with_tag(memory_desc, ndims, dims, data_type, tag) + +Creates a memory descriptor using dimensions and memory format tag. + +!!! note + + As always, the logical order of dimensions corresponds to the `abc...` format tag, and the physical meaning of the dimensions depends on both the primitive that consumes the memory and the context of that consumption. + +# Arguments +* `memory_desc`: Output memory descriptor. +* `ndims`: Number of dimensions +* `dims`: Array of dimensions. +* `data_type`: Elements data type. +* `tag`: Memory format tag. Can be #dnnl\\_format\\_tag\\_any which would allow a primitive to chose the final memory format. In this case the format\\_kind field of the memory descriptor would be set to #dnnl\\_format\\_kind\\_any. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_create_with_tag(memory_desc, ndims, dims, data_type, tag) + @ccall libdnnl.dnnl_memory_desc_create_with_tag(memory_desc::Ptr{dnnl_memory_desc_t}, ndims::Cint, dims::Ptr{Clong}, data_type::dnnl_data_type_t, tag::dnnl_format_tag_t)::dnnl_status_t +end + +""" + dnnl_memory_desc_create_submemory(memory_desc, parent_memory_desc, dims, offsets) + +# Arguments +* `memory_desc`: Output memory descriptor. +* `parent_memory_desc`: An existing memory descriptor. +* `dims`: Sizes of the region. +* `offsets`: Offsets to the region from the encompassing memory object in each dimension +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_create_submemory(memory_desc, parent_memory_desc, dims, offsets) + @ccall libdnnl.dnnl_memory_desc_create_submemory(memory_desc::Ptr{dnnl_memory_desc_t}, parent_memory_desc::const_dnnl_memory_desc_t, dims::Ptr{Clong}, offsets::Ptr{Clong})::dnnl_status_t +end + +""" + dnnl_memory_desc_reshape(out_memory_desc, in_memory_desc, ndims, dims) + +Creates a memory descriptor by reshaping an existing one. The new memory descriptor inherits the data type. This operation is valid only for memory descriptors that have format\\_kind #dnnl\\_blocked or #dnnl\\_format\\_kind\\_any. + +The resulting memory descriptor must be destroyed separately. + +The operation ensures the transformation of the physical memory format corresponds to the transformation of the logical dimensions. If such transformation is impossible, the function returns #dnnl\\_invalid\\_arguments. + +The reshape operation can be described as a combination of the following basic operations: 1. Add a dimension of size `1`. This is always possible. 2. Remove a dimension of size `1`. This is possible only if the dimension has no padding (i.e. `padded\\_dims[dim] == dims[dim] && dims[dim] == 1`). 3. Split a dimension into multiple ones. This is possible only if the size of the dimension is exactly equal to the product of the split ones and the dimension does not have padding (i.e. `padded\\_dims[dim] = dims[dim]`). 4. Joining multiple consecutive dimensions into a single one. As in the cases above, this requires that the dimensions do not have padding and that the memory format is such that in physical memory these dimensions are dense and have the same order as their logical counterparts. This also assumes that these dimensions are not blocked. - Here, dense means: `stride for dim[i] == (stride for dim[i + 1]) * dim[i + 1]`; - And same order means: `i < j` if and only if `stride for dim[j] <= stride for dim[i]`. + +!!! warning + + Some combinations of physical memory layout and/or offsets or dimensions may result in a failure to make a reshape. + +# Arguments +* `out_memory_desc`: Output memory descriptor. +* `in_memory_desc`: An existing memory descriptor. Must have format\\_kind set to #dnnl\\_blocked or #dnnl\\_format\\_kind\\_any. +* `ndims`: Number of dimensions for the output memory descriptor. +* `dims`: Dimensions for the output memory descriptor. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_reshape(out_memory_desc, in_memory_desc, ndims, dims) + @ccall libdnnl.dnnl_memory_desc_reshape(out_memory_desc::Ptr{dnnl_memory_desc_t}, in_memory_desc::const_dnnl_memory_desc_t, ndims::Cint, dims::Ptr{Clong})::dnnl_status_t +end + +""" + dnnl_memory_desc_permute_axes(out_memory_desc, in_memory_desc, permutation) + +Creates a memory descriptor by permuting axes in an existing one. + +The physical memory layout representation is adjusted accordingly to maintain the consistency between the logical and physical parts of the memory descriptor. + +The resulting memory descriptor must be destroyed separately. + +The new memory descriptor inherits the data type. This operation is valid only for memory descriptors that have format\\_kind set to #dnnl\\_blocked or #dnnl\\_format\\_kind\\_any. + +The logical axes will be permuted in the following manner: ``` for (i: 0 .. in\\_memory\\_desc->ndims) out\\_memory\\_desc->dims[permutation[i]] = in\\_memory\\_desc->dims[i]; ``` + +Example: + +```c++ + dnnl_memory_desc_t in_md, out_md, expect_out_md; + + const int permutation[] = {1, 0}; // swap the first and the second axes + + dnnl_dims_t in_dims = {2, 3}, out_dims = {3, 2}; + dnnl_format_tag_t in_tag = dnnl_ab, out_tag = dnnl_ba; + + dnnl_memory_desc_create_with_tag( + &in_md, 2, in_dims, data_type, in_tag); + dnnl_memory_desc_create_with_tag( + &expect_out_md, 2, out_dims, data_type, out_tag); + + dnnl_memory_desc_permute_axes(&out_md, in_md, permutation); + assert(dnnl_memory_desc_equal(out_md, expect_out_md)); + + dnnl_memory_desc_destroy(in_md); + dnnl_memory_desc_destroy(out_md); + dnnl_memory_desc_destroy(expect_out_md); +``` + +# Arguments +* `out_memory_desc`: Output memory descriptor. +* `in_memory_desc`: An existing memory descriptor. Must have format\\_kind set to #dnnl\\_blocked or #dnnl\\_format\\_kind\\_any. +* `permutation`: Axes permutation (of size `in\\_memory\\_desc->ndims`). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_permute_axes(out_memory_desc, in_memory_desc, permutation) + @ccall libdnnl.dnnl_memory_desc_permute_axes(out_memory_desc::Ptr{dnnl_memory_desc_t}, in_memory_desc::const_dnnl_memory_desc_t, permutation::Ptr{Cint})::dnnl_status_t +end + +""" + dnnl_memory_desc_query(memory_desc, what, result) + +Queries a memory descriptor for various pieces of information. + +The following information can be queried: - Number of dimensions (#dnnl\\_query\\_ndims\\_s32) - Dimensions (#dnnl\\_query\\_dims) in the following order: - CNN data tensors: mini-batch, channel, spatial ({N, C, [[D,] H,] W}) - CNN weight tensors: group (optional), output channel, input channel, spatial ({[G,] O, I, [[D,] H,] W}) - RNN data tensors: time, mini-batch, channels ({T, N, C}) or layers, directions, states, mini-batch, channels ({L, D, S, N, C}) - RNN weight tensor: layers, directions, input channel, gates, output channels ({L, D, I, G, O}) - Data type of the tensor elements (#dnnl\\_query\\_data\\_type) - Padded dimensions (#dnnl\\_query\\_padded\\_dims) - size of the data including padding in each dimension - Padded offsets (#dnnl\\_query\\_padded\\_offsets) - per-dimension offset from the padding to actual data, the top-level tensor with offsets applied must lie within the padding area. - Submemory offset (#dnnl\\_query\\_submemory\\_offset\\_s64) - offset from memory origin to the current block, non-zero only in a description of a memory sub-block. - Format kind (#dnnl\\_query\\_format\\_kind) - memory format kind + +!!! note + + The order of dimensions does not depend on the memory format, so whether the data is laid out in #dnnl\\_nchw or #dnnl\\_nhwc the dims for 4D CN data tensor would be {N, C, H, W}. + +The following queries are applicable only to format kind #dnnl\\_blocked. - Strides (#dnnl\\_query\\_strides) between the outermost blocks or in case of plain (non-blocked) formats the strides between dimensions - Number of innermost blocks (#dnnl\\_query\\_inner\\_nblks\\_s32), e.g. `{4, 16, 4}` in case of `OIhw_4i16o4i` - Size of the innermost blocks (#dnnl\\_query\\_inner\\_blks), e.g. 3 in case of `OIhw_4i16o4i_` - Logical indices of the blocks (#dnnl\\_query\\_inner\\_idxs), e.g. `{1, 0, 1}` in case of `4i16o4i`, because `i` is the 1st dim and `o` is the 0st dim + +# Arguments +* `memory_desc`: Memory descriptor. +* `what`: Parameter to query. +* `result`: Output result. The type depends on the query. For example, it must be a [`dnnl_dims_t`](@ref)** if querying for a strides. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_desc_query(memory_desc, what, result) + @ccall libdnnl.dnnl_memory_desc_query(memory_desc::const_dnnl_memory_desc_t, what::dnnl_query_t, result::Ptr{Cvoid})::dnnl_status_t +end + +""" + dnnl_memory_desc_equal(lhs, rhs) + +Compares two memory descriptors. + +Use this function to identify whether a reorder is required between the two memories + +# Arguments +* `lhs`: Left-hand side of the comparison. +* `rhs`: Right-hand side of the comparison. +# Returns +0 if the descriptors are different. +""" +function dnnl_memory_desc_equal(lhs, rhs) + @ccall libdnnl.dnnl_memory_desc_equal(lhs::const_dnnl_memory_desc_t, rhs::const_dnnl_memory_desc_t)::Cint +end + +""" + dnnl_memory_desc_get_size(memory_desc) + +Returns the size of a memory descriptor. + +# Arguments +* `memory_desc`: Memory descriptor. +# Returns +The number of bytes required for memory described by a memory descriptor. +""" +function dnnl_memory_desc_get_size(memory_desc) + @ccall libdnnl.dnnl_memory_desc_get_size(memory_desc::const_dnnl_memory_desc_t)::Csize_t +end + +""" + dnnl_data_type_size(data_type) + +Returns the size of data type. + +# Arguments +* `data_type`: Data type. +# Returns +The number of bytes occupied by data type. +""" +function dnnl_data_type_size(data_type) + @ccall libdnnl.dnnl_data_type_size(data_type::dnnl_data_type_t)::Csize_t +end + +""" + dnnl_memory_create(memory, memory_desc, engine, handle) + +Creates a memory object. + +Unless `handle` is equal to [`DNNL_MEMORY_NONE`](@ref), the constructed memory object will have the underlying buffer set. In this case, the buffer will be initialized as if [`dnnl_memory_set_data_handle`](@ref)() had been called. + +# Arguments +* `memory`: Output memory object. +* `memory_desc`: Memory descriptor. +* `engine`: Engine to use. +* `handle`: Handle of the memory buffer to use as an underlying storage. - A pointer to the user-allocated buffer. In this case the library doesn't own the buffer. - The [`DNNL_MEMORY_ALLOCATE`](@ref) special value. Instructs the library to allocate the buffer for the memory object. In this case the library owns the buffer. - [`DNNL_MEMORY_NONE`](@ref) to create [`dnnl_memory`](@ref) without an underlying buffer. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +# See also +[`dnnl_memory_set_data_handle`](@ref)() +""" +function dnnl_memory_create(memory, memory_desc, engine, handle) + @ccall libdnnl.dnnl_memory_create(memory::Ptr{dnnl_memory_t}, memory_desc::const_dnnl_memory_desc_t, engine::dnnl_engine_t, handle::Ptr{Cvoid})::dnnl_status_t +end + +""" + dnnl_memory_get_memory_desc(memory, memory_desc) + +Returns the memory descriptor for a memory object. + +# Arguments +* `memory`: Memory object. +* `memory_desc`: Output memory descriptor (a copy). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_get_memory_desc(memory, memory_desc) + @ccall libdnnl.dnnl_memory_get_memory_desc(memory::const_dnnl_memory_t, memory_desc::Ptr{const_dnnl_memory_desc_t})::dnnl_status_t +end + +""" + dnnl_memory_get_engine(memory, engine) + +Returns the engine of a memory object. + +# Arguments +* `memory`: Memory object. +* `engine`: Output engine on which the memory is located. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_get_engine(memory, engine) + @ccall libdnnl.dnnl_memory_get_engine(memory::const_dnnl_memory_t, engine::Ptr{dnnl_engine_t})::dnnl_status_t +end + +""" + dnnl_memory_map_data(memory, mapped_ptr) + +Maps a memory object and returns a host-side pointer to a memory buffer with a copy of its contents. + +Mapping enables explicit direct access to memory contents for the engines that do not support it implicitly. + +Mapping is an exclusive operation - a memory object cannot be used in other operations until this memory object is unmapped. + +!!! note + + Any primitives working with `memory` should be completed before the memory is mapped. Use [`dnnl_stream_wait`](@ref) to synchronize the corresponding execution stream. + +!!! note + + The [`dnnl_memory_map_data`](@ref)() and [`dnnl_memory_unmap_data`](@ref)() functions are mainly provided for debug and testing purposes, and their performance may be suboptimal. + +# Arguments +* `memory`: Memory object. +* `mapped_ptr`: Output pointer to the mapped buffer. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_map_data(memory, mapped_ptr) + @ccall libdnnl.dnnl_memory_map_data(memory::const_dnnl_memory_t, mapped_ptr::Ptr{Ptr{Cvoid}})::dnnl_status_t +end + +""" + dnnl_memory_unmap_data(memory, mapped_ptr) + +Unmaps a memory object and writes back any changes made to the previously mapped memory buffer. The pointer to the mapped buffer must be obtained via the [`dnnl_memory_map_data`](@ref)() call. + +!!! note + + The [`dnnl_memory_map_data`](@ref)() and [`dnnl_memory_unmap_data`](@ref)() functions are mainly provided for debug and testing purposes, and their performance may be suboptimal. + +# Arguments +* `memory`: Memory object. +* `mapped_ptr`: Pointer to the mapped buffer that must have been obtained using the [`dnnl_memory_map_data`](@ref)() function. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_unmap_data(memory, mapped_ptr) + @ccall libdnnl.dnnl_memory_unmap_data(memory::const_dnnl_memory_t, mapped_ptr::Ptr{Cvoid})::dnnl_status_t +end + +""" + dnnl_memory_get_data_handle(memory, handle) + +Returns memory object's data handle. + +# Arguments +* `memory`: Memory object. +* `handle`: Output data handle. For the CPU engine, the data handle is a pointer to the actual data. For OpenCL it is a cl\\_mem. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_get_data_handle(memory, handle) + @ccall libdnnl.dnnl_memory_get_data_handle(memory::const_dnnl_memory_t, handle::Ptr{Ptr{Cvoid}})::dnnl_status_t +end + +""" + dnnl_memory_set_data_handle(memory, handle) + +Sets the underlying memory buffer. + +# Arguments +* `memory`: Memory object. +* `handle`: Data handle. For the CPU engine or when USM is used, the memory buffer is a pointer to the actual data. For OpenCL it is a `cl_mem`. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_set_data_handle(memory, handle) + @ccall libdnnl.dnnl_memory_set_data_handle(memory::dnnl_memory_t, handle::Ptr{Cvoid})::dnnl_status_t +end + +""" + dnnl_memory_destroy(memory) + +Destroys a memory object. + +# Arguments +* `memory`: Memory object to destroy. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_memory_destroy(memory) + @ccall libdnnl.dnnl_memory_destroy(memory::dnnl_memory_t)::dnnl_status_t +end + +""" + dnnl_reorder_primitive_desc_create(reorder_primitive_desc, src_desc, src_engine, dst_desc, dst_engine, attr) + +Creates a primitive descriptor for a reorder primitive. + +# Arguments +* `reorder_primitive_desc`: Output primitive descriptor. +* `src_desc`: Source memory descriptor. +* `src_engine`: Engine on which the source memory object will be located. +* `dst_desc`: Destination memory descriptor. +* `dst_engine`: Engine on which the destination memory object will be located. +* `attr`: Primitive attributes to use (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_reorder_primitive_desc_create(reorder_primitive_desc, src_desc, src_engine, dst_desc, dst_engine, attr) + @ccall libdnnl.dnnl_reorder_primitive_desc_create(reorder_primitive_desc::Ptr{dnnl_primitive_desc_t}, src_desc::const_dnnl_memory_desc_t, src_engine::dnnl_engine_t, dst_desc::const_dnnl_memory_desc_t, dst_engine::dnnl_engine_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_concat_primitive_desc_create(concat_primitive_desc, engine, dst_desc, n, concat_dimension, src_descs, attr) + +Creates a primitive descriptor for an out-of-place concatenation primitive. + +# Arguments +* `concat_primitive_desc`: Output primitive descriptor. +* `dst_desc`: Destination memory descriptor. +* `n`: Number of source parameters. +* `concat_dimension`: Source tensors will be concatenated over dimension with this index. Note that order of dimensions does not depend on memory format. +* `src_descs`: Array of source memory descriptors with `n` elements. +* `attr`: Primitive attributes to use (can be NULL). +* `engine`: Engine to use. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_concat_primitive_desc_create(concat_primitive_desc, engine, dst_desc, n, concat_dimension, src_descs, attr) + @ccall libdnnl.dnnl_concat_primitive_desc_create(concat_primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, dst_desc::const_dnnl_memory_desc_t, n::Cint, concat_dimension::Cint, src_descs::Ptr{const_dnnl_memory_desc_t}, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_sum_primitive_desc_create(sum_primitive_desc, engine, dst_desc, n, scales, src_descs, attr) + +Creates a primitive descriptor for an (out-of-place) sum primitive. + +# Arguments +* `sum_primitive_desc`: Output primitive descriptor. +* `dst_desc`: Destination memory descriptor. +* `n`: Number of source parameters. +* `scales`: Vector of scales to multiply data in each source memory by. +* `src_descs`: Array of source memory descriptors having `n` elements. +* `attr`: Primitive attributes to use (can be NULL). +* `engine`: Engine to use. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_sum_primitive_desc_create(sum_primitive_desc, engine, dst_desc, n, scales, src_descs, attr) + @ccall libdnnl.dnnl_sum_primitive_desc_create(sum_primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, dst_desc::const_dnnl_memory_desc_t, n::Cint, scales::Ptr{Cfloat}, src_descs::Ptr{const_dnnl_memory_desc_t}, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_binary_primitive_desc_create(primitive_desc, engine, alg_kind, src0_desc, src1_desc, dst_desc, attr) + +Creates a primitive descriptor for a binary primitive. + +!!! note + + Memory descriptors `src1_desc` and `dst_desc` are alloweded to be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +!!! note + + Both memory descriptors must have the same number of dimensions. Element broadcasting is supported for memory descriptor `src1_desc` and are applied to `src1_desc` dimensions that have size equal to 1. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Algorithm kind. Valid values are #dnnl\\_binary\\_add, #dnnl\\_binary\\_mul, #dnnl\\_binary\\_max, #dnnl\\_binary\\_min, #dnnl\\_binary\\_div, #dnnl\\_binary\\_sub, #dnnl\\_binary\\_ge, #dnnl\\_binary\\_gt, #dnnl\\_binary\\_le, #dnnl\\_binary\\_lt, #dnnl\\_binary\\_eq and #dnnl\\_binary\\_ne. +* `src0_desc`: Source 0 memory descriptor. +* `src1_desc`: Source 1 memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_binary_primitive_desc_create(primitive_desc, engine, alg_kind, src0_desc, src1_desc, dst_desc, attr) + @ccall libdnnl.dnnl_binary_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, src0_desc::const_dnnl_memory_desc_t, src1_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_convolution_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, weights_desc, bias_desc, dst_desc, strides, dilates, padding_l, padding_r, attr) + +Creates a primitive descriptor for a convolution forward propagation primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +Arrays `strides`, `dilates`, `padding_l`, and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: Convolution algorithm. Possible values are #dnnl\\_convolution\\_direct, #dnnl\\_convolution\\_winograd, #dnnl\\_convolution\\_auto. +* `src_desc`: Source memory descriptor. +* `weights_desc`: Weights memory descriptor. +* `bias_desc`: Bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `dst_desc`: Destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `dilates`: Array of dilations for spatial dimension. A zero value means no dilation in the corresponding dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_convolution_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, weights_desc, bias_desc, dst_desc, strides, dilates, padding_l, padding_r, attr) + @ccall libdnnl.dnnl_convolution_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, dilates::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_convolution_backward_data_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, weights_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + +Creates a primitive descriptor for a convolution backward propagation primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +Arrays `strides`, `dilates`, `padding_l`, and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Convolution algorithm. Possible values are #dnnl\\_convolution\\_direct, #dnnl\\_convolution\\_winograd, #dnnl\\_convolution\\_auto. +* `diff_src_desc`: Diff source memory descriptor. +* `weights_desc`: Weights memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `dilates`: Array of dilations for spatial dimension. A zero value means no dilation in the corresponding dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_convolution_backward_data_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, weights_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_convolution_backward_data_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, diff_src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, dilates::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_convolution_backward_weights_primitive_desc_create(primitive_desc, engine, alg_kind, src_desc, diff_weights_desc, diff_bias_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + +Creates a primitive descriptor for a convolution weights gradient primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +Arrays `strides`, `dilates`, `padding_l`, and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Convolution algorithm. Possible values are #dnnl\\_convolution\\_direct, #dnnl\\_convolution\\_winograd, #dnnl\\_convolution\\_auto. +* `src_desc`: Source memory descriptor. +* `diff_weights_desc`: Diff weights memory descriptor. +* `diff_bias_desc`: Diff bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `diff_dst_desc`: Diff destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `dilates`: Array of dilations for spatial dimension. A zero value means no dilation in the corresponding dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_convolution_backward_weights_primitive_desc_create(primitive_desc, engine, alg_kind, src_desc, diff_weights_desc, diff_bias_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_convolution_backward_weights_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, diff_weights_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, dilates::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_deconvolution_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, weights_desc, bias_desc, dst_desc, strides, dilates, padding_l, padding_r, attr) + +Creates a primitive descriptor for a deconvolution forward propagation primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +Arrays `strides`, `dilates`, `padding_l`, and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: Deconvolution algorithm. Possible values are #dnnl\\_deconvolution\\_direct, #dnnl\\_deconvolution\\_winograd. +* `src_desc`: Source memory descriptor. +* `weights_desc`: Weights memory descriptor. +* `bias_desc`: Bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `dst_desc`: Destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `dilates`: Array of dilations for spatial dimension. A zero value means no dilation in the corresponding dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_deconvolution_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, weights_desc, bias_desc, dst_desc, strides, dilates, padding_l, padding_r, attr) + @ccall libdnnl.dnnl_deconvolution_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, dilates::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_deconvolution_backward_data_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, weights_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + +Creates a primitive descriptor for a deconvolution backward propagation primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +Arrays `strides`, `dilates`, `padding_l`, and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Deconvolution algorithm. Possible values are #dnnl\\_deconvolution\\_direct, #dnnl\\_deconvolution\\_winograd. +* `diff_src_desc`: Diff source memory descriptor. +* `weights_desc`: Weights memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `dilates`: Array of dilations for spatial dimension. A zero value means no dilation in the corresponding dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_deconvolution_backward_data_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, weights_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_deconvolution_backward_data_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, diff_src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, dilates::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_deconvolution_backward_weights_primitive_desc_create(primitive_desc, engine, alg_kind, src_desc, diff_weights_desc, diff_bias_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + +Creates a primitive descriptor for a deconvolution weights gradient primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +Arrays `strides`, `dilates`, `padding_l`, and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Deconvolution algorithm. Possible values are #dnnl\\_deconvolution\\_direct, #dnnl\\_deconvolution\\_winograd. +* `src_desc`: Source memory descriptor. +* `diff_weights_desc`: Diff weights memory descriptor. +* `diff_bias_desc`: Diff bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `diff_dst_desc`: Diff destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `dilates`: Array of dilations for spatial dimension. A zero value means no dilation in the corresponding dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_deconvolution_backward_weights_primitive_desc_create(primitive_desc, engine, alg_kind, src_desc, diff_weights_desc, diff_bias_desc, diff_dst_desc, strides, dilates, padding_l, padding_r, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_deconvolution_backward_weights_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, diff_weights_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, dilates::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_shuffle_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, axis, group_size, attr) + +Creates a primitive descriptor for a shuffle forward propagation primitive + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `axis`: The axis along which the data is shuffled. +* `group_size`: Shuffle group size. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_shuffle_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, axis, group_size, attr) + @ccall libdnnl.dnnl_shuffle_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, axis::Cint, group_size::dnnl_dim_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_shuffle_backward_primitive_desc_create(primitive_desc, engine, diff_src_desc, diff_dst_desc, axis, group_size, hint_fwd_pd, attr) + +Creates a primitive descriptor for a shuffle backward propagation primitive + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `axis`: The axis along which the data is shuffled. +* `group_size`: Shuffle group size. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_shuffle_backward_primitive_desc_create(primitive_desc, engine, diff_src_desc, diff_dst_desc, axis, group_size, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_shuffle_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, axis::Cint, group_size::dnnl_dim_t, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_eltwise_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, alpha, beta, attr) + +Creates a primitive descriptor for an eltwise forward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: Elementwise algorithm kind. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `alpha`: The alpha parameter for the elementwise operation. Specific meaning depends on the algorithm. +* `beta`: The beta parameter for the elementwise operation. Specific meaning depends on the algorithm. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_eltwise_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, alpha, beta, attr) + @ccall libdnnl.dnnl_eltwise_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, alpha::Cfloat, beta::Cfloat, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_eltwise_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, data_desc, alpha, beta, hint_fwd_pd, attr) + +Creates a primitive descriptor for an eltwise backward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Elementwise algorithm kind. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `data_desc`: Destination memory descriptor if one of the "use\\_dst\\_for\\_bwd" algorithms are used (such as #dnnl\\_eltwise\\_relu\\_use\\_dst\\_for\\_bwd), source memory descriptor otherwise. +* `alpha`: The alpha parameter for the elementwise operation. Specific meaning depends on the algorithm. +* `beta`: The beta parameter for the elementwise operation. Specific meaning depends on the algorithm. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_eltwise_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, data_desc, alpha, beta, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_eltwise_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, data_desc::const_dnnl_memory_desc_t, alpha::Cfloat, beta::Cfloat, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_softmax_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, softmax_axis, attr) + +Creates a primitive descriptor for a softmax forward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: Softmax algorithm kind: either #dnnl\\_softmax\\_accurate, or #dnnl\\_softmax\\_log. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `softmax_axis`: Axis over which softmax is computed. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_softmax_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, softmax_axis, attr) + @ccall libdnnl.dnnl_softmax_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, softmax_axis::Cint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_softmax_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, dst_desc, softmax_axis, hint_fwd_pd, attr) + +Creates a primitive descriptor for a softmax backward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Softmax algorithm kind: either #dnnl\\_softmax\\_accurate, or #dnnl\\_softmax\\_log. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `softmax_axis`: Axis over which softmax is computed. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_softmax_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, dst_desc, softmax_axis, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_softmax_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, softmax_axis::Cint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_pooling_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, strides, kernel, dilation, padding_l, padding_r, attr) + +Creates a primitive descriptor for a pooling forward propagation primitive. + +Arrays `strides`, `kernel`, `dilation`, `padding_l` and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: Pooling algorithm kind: either #dnnl\\_pooling\\_max, #dnnl\\_pooling\\_avg\\_include\\_padding, or #dnnl\\_pooling\\_avg\\_exclude\\_padding. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `kernel`: Array of kernel spatial dimensions. +* `dilation`: Array of dilations for spatial dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_pooling_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, strides, kernel, dilation, padding_l, padding_r, attr) + @ccall libdnnl.dnnl_pooling_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, kernel::Ptr{Clong}, dilation::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_pooling_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, strides, kernel, dilation, padding_l, padding_r, hint_fwd_pd, attr) + +Creates a primitive descriptor for a pooling backward propagation primitive. + +Arrays `strides`, `kernel`, `dilation`, `padding_l` and `padding_r` contain values for spatial dimensions only and hence must have the same number of elements as there are spatial dimensions. The order of values is the same as in the tensor: depth (for 3D tensors), height (for 3D and 2D tensors), and width. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: Pooling algorithm kind: either #dnnl\\_pooling\\_max, #dnnl\\_pooling\\_avg\\_include\\_padding, or #dnnl\\_pooling\\_avg\\_exclude\\_padding. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `strides`: Array of strides for spatial dimension. +* `kernel`: Array of kernel spatial dimensions. +* `dilation`: Array of dilations for spatial dimension. +* `padding_l`: Array of padding values for low indices for each spatial dimension `([[front,] top,] left)`. +* `padding_r`: Array of padding values for high indices for each spatial dimension `([[back,] bottom,] right)`. Can be NULL in which case padding is considered to be symmetrical. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_pooling_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, strides, kernel, dilation, padding_l, padding_r, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_pooling_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, strides::Ptr{Clong}, kernel::Ptr{Clong}, dilation::Ptr{Clong}, padding_l::Ptr{Clong}, padding_r::Ptr{Clong}, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_prelu_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, weights_desc, dst_desc, attr) + +Creates a primitive descriptor for a PReLU (leaky ReLU with trainable alpha parameter) forward propagation primitive. + +!!! note + + weights descriptor is allowed to be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `weights_desc`: Alpha parameters memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_prelu_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, weights_desc, dst_desc, attr) + @ccall libdnnl.dnnl_prelu_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_prelu_backward_primitive_desc_create(primitive_desc, engine, src_desc, weights_desc, diff_src_desc, diff_weights_desc, diff_dst_desc, hint_fwd_pd, attr) + +Creates a primitive descriptor for a PReLU (leaky ReLU with trainable alpha parameter) backward propagation primitive. + +!!! note + + weights descriptor and diff\\_weights descriptor are allowed to be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `src_desc`: Source memory descriptor. +* `weights_desc`: Alpha parameters memory descriptor. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_weights_desc`: Diff alpha parameters memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_prelu_backward_primitive_desc_create(primitive_desc, engine, src_desc, weights_desc, diff_src_desc, diff_weights_desc, diff_dst_desc, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_prelu_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, diff_src_desc::const_dnnl_memory_desc_t, diff_weights_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lrn_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, local_size, alpha, beta, k, attr) + +Creates a primitive descriptor for an LRN forward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: LRN algorithm kind: either #dnnl\\_lrn\\_across\\_channels or #dnnl\\_lrn\\_within\\_channel. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `local_size`: Regularization local size. +* `alpha`: The alpha regularization parameter. +* `beta`: The beta regularization parameter. +* `k`: The k regularization parameter. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lrn_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, src_desc, dst_desc, local_size, alpha, beta, k, attr) + @ccall libdnnl.dnnl_lrn_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, local_size::dnnl_dim_t, alpha::Cfloat, beta::Cfloat, k::Cfloat, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lrn_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, src_desc, local_size, alpha, beta, k, hint_fwd_pd, attr) + +Creates a primitive descriptor for an LRN backward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `alg_kind`: LRN algorithm kind: either #dnnl\\_lrn\\_across\\_channels or #dnnl\\_lrn\\_within\\_channel. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `src_desc`: Source memory descriptor. +* `local_size`: Regularization local size. +* `alpha`: The alpha regularization parameter. +* `beta`: The beta regularization parameter. +* `k`: The k regularization parameter. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lrn_backward_primitive_desc_create(primitive_desc, engine, alg_kind, diff_src_desc, diff_dst_desc, src_desc, local_size, alpha, beta, k, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_lrn_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, src_desc::const_dnnl_memory_desc_t, local_size::dnnl_dim_t, alpha::Cfloat, beta::Cfloat, k::Cfloat, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_batch_normalization_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, epsilon, flags, attr) + +Creates a primitive descriptor for a batch normalization forward propagation primitive. + +!!! note + + In-place operation is supported: the dst can refer to the same memory as the src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `epsilon`: Batch normalization epsilon parameter. +* `flags`: Batch normalization flags (dnnl_normalization_flags_t). +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_batch_normalization_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, epsilon, flags, attr) + @ccall libdnnl.dnnl_batch_normalization_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, epsilon::Cfloat, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_batch_normalization_backward_primitive_desc_create(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, epsilon, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for a batch normalization backward propagation primitive. + +!!! note + + In-place operation is supported: the diff\\_dst can refer to the same memory as the diff\\_src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_backward\\_data and #dnnl\\_backward (diffs for all parameters are computed in this case). +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `src_desc`: Source memory descriptor. +* `epsilon`: Batch normalization epsilon parameter. +* `flags`: Batch normalization flags (dnnl_normalization_flags_t). +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_batch_normalization_backward_primitive_desc_create(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, epsilon, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_batch_normalization_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, src_desc::const_dnnl_memory_desc_t, epsilon::Cfloat, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_group_normalization_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, groups, epsilon, flags, attr) + +Creates a primitive descriptor for a group normalization forward propagation primitive. + +!!! note + + In-place operation is supported: the dst can refer to the same memory as the src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `groups`: Group normalization groups parameter. +* `epsilon`: Group normalization epsilon parameter. +* `flags`: Group normalization flags (dnnl_normalization_flags_t). +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_group_normalization_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, groups, epsilon, flags, attr) + @ccall libdnnl.dnnl_group_normalization_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, groups::dnnl_dim_t, epsilon::Cfloat, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_group_normalization_backward_primitive_desc_create(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, groups, epsilon, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for a group normalization backward propagation primitive. + +!!! note + + In-place operation is supported: the diff\\_dst can refer to the same memory as the diff\\_src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_backward\\_data and #dnnl\\_backward (diffs for all parameters are computed in this case). +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `src_desc`: Source memory descriptor. +* `groups`: Group normalization groups parameter. +* `epsilon`: Group normalization epsilon parameter. +* `flags`: Group normalization flags (dnnl_normalization_flags_t). +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_group_normalization_backward_primitive_desc_create(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, groups, epsilon, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_group_normalization_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, src_desc::const_dnnl_memory_desc_t, groups::dnnl_dim_t, epsilon::Cfloat, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_layer_normalization_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, stat_desc, epsilon, flags, attr) + +Creates a primitive descriptor for a layer normalization forward propagation primitive. + +!!! note + + In-place operation is supported: the dst can refer to the same memory as the src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `stat_desc`: Memory descriptor for mean and variance. If this parameter is NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef, then the memory descriptor for stats is derived from `src_desc` by removing the last dimension. +* `epsilon`: Layer normalization epsilon parameter. +* `flags`: Layer normalization flags (dnnl_normalization_flags_t). +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_layer_normalization_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, dst_desc, stat_desc, epsilon, flags, attr) + @ccall libdnnl.dnnl_layer_normalization_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, stat_desc::const_dnnl_memory_desc_t, epsilon::Cfloat, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_layer_normalization_backward_primitive_desc_create(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, stat_desc, epsilon, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for a layer normalization backward propagation primitive. + +!!! note + + In-place operation is supported: the diff\\_dst can refer to the same memory as the diff\\_src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_backward\\_data and #dnnl\\_backward (diffs for all parameters are computed in this case). +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `src_desc`: Source memory descriptor. +* `stat_desc`: Memory descriptor for mean and variance. If this parameter is NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef, then the memory descriptor for stats is derived from `src_desc` by removing the last dimension. +* `epsilon`: Layer normalization epsilon parameter. +* `flags`: Layer normalization flags (dnnl_normalization_flags_t). +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_layer_normalization_backward_primitive_desc_create(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, stat_desc, epsilon, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_layer_normalization_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, src_desc::const_dnnl_memory_desc_t, stat_desc::const_dnnl_memory_desc_t, epsilon::Cfloat, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_layer_normalization_forward_primitive_desc_create_v2(primitive_desc, engine, prop_kind, src_desc, dst_desc, stat_desc, scale_shift_data_type, epsilon, flags, attr) + +Creates a primitive descriptor for a layer normalization forward propagation primitive with a user-provided data type for the scale and shift memory objects. + +!!! note + + In-place operation is supported: the dst can refer to the same memory as the src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `stat_desc`: Memory descriptor for mean and variance. If this parameter is NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef, then the memory descriptor for stats is derived from `src_desc` by removing the last dimension. +* `scale_shift_data_type`: Data type of scale and shift memory. If neither scale nor shift flag are specified the parameter is ignored. +* `epsilon`: Layer normalization epsilon parameter. +* `flags`: Layer normalization flags (dnnl_normalization_flags_t). +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_layer_normalization_forward_primitive_desc_create_v2(primitive_desc, engine, prop_kind, src_desc, dst_desc, stat_desc, scale_shift_data_type, epsilon, flags, attr) + @ccall libdnnl.dnnl_layer_normalization_forward_primitive_desc_create_v2(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, stat_desc::const_dnnl_memory_desc_t, scale_shift_data_type::dnnl_data_type_t, epsilon::Cfloat, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_layer_normalization_backward_primitive_desc_create_v2(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, stat_desc, diff_scale_shift_data_type, scale_shift_data_type, epsilon, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for a layer normalization backward propagation primitive with a user-provided data type for the scale and shift memory objects. + +!!! note + + In-place operation is supported: the diff\\_dst can refer to the same memory as the diff\\_src. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_backward\\_data and #dnnl\\_backward (diffs for all parameters are computed in this case). +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `src_desc`: Source memory descriptor. +* `stat_desc`: Memory descriptor for mean and variance. If this parameter is NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef, then the memory descriptor for stats is derived from `src_desc` by removing the last dimension. +* `diff_scale_shift_data_type`: Data type of diff scale and shift memory. If neither scale nor shift flag are specified the parameter is ignored. +* `scale_shift_data_type`: Data type of scale and shift memory. If neither scale nor shift flag are specified the parameter is ignored. +* `epsilon`: Layer normalization epsilon parameter. +* `flags`: Layer normalization flags (dnnl_normalization_flags_t). +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_layer_normalization_backward_primitive_desc_create_v2(primitive_desc, engine, prop_kind, diff_src_desc, diff_dst_desc, src_desc, stat_desc, diff_scale_shift_data_type, scale_shift_data_type, epsilon, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_layer_normalization_backward_primitive_desc_create_v2(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, src_desc::const_dnnl_memory_desc_t, stat_desc::const_dnnl_memory_desc_t, diff_scale_shift_data_type::dnnl_data_type_t, scale_shift_data_type::dnnl_data_type_t, epsilon::Cfloat, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_inner_product_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, weights_desc, bias_desc, dst_desc, attr) + +Creates a primitive descriptor for an inner product forward propagation primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `src_desc`: Source memory descriptor. +* `weights_desc`: Weights memory descriptor. +* `bias_desc`: Bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `dst_desc`: Destination memory descriptor. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_inner_product_forward_primitive_desc_create(primitive_desc, engine, prop_kind, src_desc, weights_desc, bias_desc, dst_desc, attr) + @ccall libdnnl.dnnl_inner_product_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_inner_product_backward_data_primitive_desc_create(primitive_desc, engine, diff_src_desc, weights_desc, diff_dst_desc, hint_fwd_pd, attr) + +Creates a primitive descriptor for an inner product backward propagation primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `diff_src_desc`: Diff source memory descriptor. +* `weights_desc`: Weights memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_inner_product_backward_data_primitive_desc_create(primitive_desc, engine, diff_src_desc, weights_desc, diff_dst_desc, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_inner_product_backward_data_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, diff_src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_inner_product_backward_weights_primitive_desc_create(primitive_desc, engine, src_desc, diff_weights_desc, diff_bias_desc, diff_dst_desc, hint_fwd_pd, attr) + +Creates a primitive descriptor for an inner product weights gradient primitive. + +!!! note + + Memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive\\_descriptor. +* `engine`: Engine to use. +* `src_desc`: Source memory descriptor. +* `diff_weights_desc`: Diff weights memory descriptor. +* `diff_bias_desc`: Diff bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `diff_dst_desc`: Diff destination memory descriptor. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_inner_product_backward_weights_primitive_desc_create(primitive_desc, engine, src_desc, diff_weights_desc, diff_bias_desc, diff_dst_desc, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_inner_product_backward_weights_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, src_desc::const_dnnl_memory_desc_t, diff_weights_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_rnn_data_qparams(attr, scale, shift) + +Set quantization scale and shift parameters for RNN data tensors. + +For performance reasons, the low-precision configuration of the RNN primitives expects input activations to have the unsigned 8-bit integer data type. The scale and shift parameters are used to quantize floating-point data to unsigned integer and must be passed to the RNN primitive using attributes. + +The quantization formula is `scale * data + shift`. + +!!! note + + Quantization scale and shift are common for src\\_layer, src\\_iter, dst\\_iter, and dst\\_layer. + +Example usage: + +```c++ + // RNN parameters + int l = 2, t = 2, mb = 32, sic = 32, slc = 32, dic = 32, dlc = 32; + // Activations quantization parameters + float scale = 63.f, shift = 64.f; + + dnnl_primitive_attr_t rnn_attr; + // Create default attributes + dnnl_primitive_attr_create(&rnn_attr); + + // Set scale and shift for int8 quantization of activation + dnnl_primitive_attr_set_rnn_data_qparams(rnn_attr, scale, shift); + + // Create an RNN primitive descriptor. + dnnl_primitive_desc_t rnn_pd; + dnnl_vanilla_rnn_forward_primitive_desc_create(&rnn_pd, + engine, /* arguments */, attr); +``` + +# Arguments +* `attr`: Primitive attributes. +* `scale`: The value to scale the data by. +* `shift`: The value to shift the data by. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_rnn_data_qparams(attr, scale, shift) + @ccall libdnnl.dnnl_primitive_attr_set_rnn_data_qparams(attr::dnnl_primitive_attr_t, scale::Cfloat, shift::Cfloat)::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_rnn_data_qparams(attr, scale, shift) + +Returns the quantization scale and shift parameters for RNN data tensors. + +!!! note + + Quantization scale and shift are common for src\\_layer, src\\_iter, dst\\_iter, and dst\\_layer. + +# Arguments +* `attr`: Primitive attributes. +* `scale`: The value to scale the data by. +* `shift`: The value to shift the data by. +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_rnn_data_qparams(attr, scale, shift) + @ccall libdnnl.dnnl_primitive_attr_get_rnn_data_qparams(attr::const_dnnl_primitive_attr_t, scale::Ptr{Cfloat}, shift::Ptr{Cfloat})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_rnn_weights_qparams(attr, count, mask, scales) + +Sets quantization scaling factors for RNN weights tensors. The low-precision configuration of the RNN primitives expects input weights to use the signed 8-bit integer data type. The scaling factors are used to quantize floating-point data to signed integer and must be passed to RNN primitives using attributes. + +!!! note + + The dimension order is always native and does not depend on the actual layout used. For example, five-dimensional weights always have (l, d, i, g, o) logical dimension ordering. + +!!! note + + Quantization scales are common for weights\\_layer and weights\\_iteration + +```c++ +count = \\prod\\limits_{d \\in mask} weights.dims[d]. +``` + +Violations can only be detected when the attributes are used to create a primitive descriptor. + +# Arguments +* `attr`: Primitive attributes. +* `count`: Number of elements in the `scales` array. +* `mask`: Scaling factors correspondence mask that defines the correspondence between the output tensor dimensions and the `scales` vector. The set i-th bit indicates that a dedicated scaling factor should be used for each index along that dimension. Set the mask to 0 to use a common scaling factor for the whole output tensor. +* `scales`: Array of output scaling factors that must contain `count` values and the following equality must hold: +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_rnn_weights_qparams(attr, count, mask, scales) + @ccall libdnnl.dnnl_primitive_attr_set_rnn_weights_qparams(attr::dnnl_primitive_attr_t, count::dnnl_dim_t, mask::Cint, scales::Ptr{Cfloat})::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_rnn_weights_qparams(attr, count, mask, scales) + +Returns the quantization scaling factors for RNN weights tensors. + +```c++ +count = \\prod\\limits_{d \\in mask} weights.dims[d]. +``` + +# Arguments +* `attr`: Primitive attributes. +* `count`: Number of elements in the `scales` array. +* `mask`: Scaling factors correspondence mask that defines the correspondence between the output tensor dimensions and the `scales` vector. The set i-th bit indicates that a dedicated scaling factor should be used for each index along that dimension. Set the mask to 0 to use a common scaling factor for the whole output tensor. +* `scales`: Array of output scaling factors that contain `count` values and the following equality must hold: +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_rnn_weights_qparams(attr, count, mask, scales) + @ccall libdnnl.dnnl_primitive_attr_get_rnn_weights_qparams(attr::const_dnnl_primitive_attr_t, count::Ptr{dnnl_dim_t}, mask::Ptr{Cint}, scales::Ptr{Ptr{Cfloat}})::dnnl_status_t +end + +""" + dnnl_primitive_attr_set_rnn_weights_projection_qparams(attr, count, mask, scales) + +Sets quantization scaling factors for RNN projection weights tensors. The low-precision configuration of the RNN primitives expects input weights to use the signed 8-bit integer data type. The scaling factors are used to quantize floating-point data to signed integer and must be passed to RNN primitives using attributes. + +!!! note + + The dimension order is always native and does not depend on the actual layout used. For example, five-dimensional weights always have (l, d, i, g, o) logical dimension ordering. + +```c++ +count = \\prod\\limits_{d \\in mask} weights.dims[d]. +``` + +Violations can only be detected when the attributes are used to create a primitive descriptor. + +# Arguments +* `attr`: Primitive attributes. +* `count`: Number of elements in the `scales` array. +* `mask`: Scaling factors correspondence mask that defines the correspondence between the output tensor dimensions and the `scales` vector. The set i-th bit indicates that a dedicated scaling factor should be used for each index along that dimension. Set the mask to 0 to use a common scaling factor for the whole output tensor. +* `scales`: Array of output scaling factors that must contain `count` values and the following equality must hold: +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_set_rnn_weights_projection_qparams(attr, count, mask, scales) + @ccall libdnnl.dnnl_primitive_attr_set_rnn_weights_projection_qparams(attr::dnnl_primitive_attr_t, count::dnnl_dim_t, mask::Cint, scales::Ptr{Cfloat})::dnnl_status_t +end + +""" + dnnl_primitive_attr_get_rnn_weights_projection_qparams(attr, count, mask, scales) + +Returns the quantization scaling factors for RNN projection weights tensors. + +```c++ +count = \\prod\\limits_{d \\in mask} weights.dims[d]. +``` + +# Arguments +* `attr`: Primitive attributes. +* `count`: Number of elements in the `scales` array. +* `mask`: Scaling factors correspondence mask that defines the correspondence between the output tensor dimensions and the `scales` vector. The set i-th bit indicates that a dedicated scaling factor should be used for each index along that dimension. Set the mask to 0 to use a common scaling factor for the whole output tensor. +* `scales`: Array of output scaling factors that contain `count` values and the following equality must hold: +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_primitive_attr_get_rnn_weights_projection_qparams(attr, count, mask, scales) + @ccall libdnnl.dnnl_primitive_attr_get_rnn_weights_projection_qparams(attr::const_dnnl_primitive_attr_t, count::Ptr{dnnl_dim_t}, mask::Ptr{Cint}, scales::Ptr{Ptr{Cfloat}})::dnnl_status_t +end + +""" + dnnl_vanilla_rnn_forward_primitive_desc_create(primitive_desc, engine, prop_kind, activation, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, alpha, beta, attr) + +Creates a primitive descriptor for vanilla RNN forward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc`, - `bias_desc`, - `dst_iter_desc`. + +This would then indicate that the RNN forward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `activation`: Activation kind. Possible values are #dnnl\\_eltwise\\_relu, #dnnl\\_eltwise\\_tanh or #dnnl\\_eltwise\\_logistic. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `flags`: Unused. +* `alpha`: Negative slope if activation is #dnnl\\_eltwise\\_relu. +* `beta`: Unused. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_vanilla_rnn_forward_primitive_desc_create(primitive_desc, engine, prop_kind, activation, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, alpha, beta, attr) + @ccall libdnnl.dnnl_vanilla_rnn_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, activation::dnnl_alg_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, alpha::Cfloat, beta::Cfloat, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_vanilla_rnn_backward_primitive_desc_create(primitive_desc, engine, prop_kind, activation, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, alpha, beta, hint_fwd_pd, attr) + +Creates a primitive descriptor for vanilla RNN backward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `diff_src_iter_desc`, - `bias_desc` together with `diff_bias_desc`, - `dst_iter_desc` together with `diff_dst_iter_desc`. + +This would then indicate that the RNN backward propagation primitive should not use the respective data and should use zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Must be #dnnl\\_backward. +* `activation`: Activation kind. Possible values are #dnnl\\_eltwise\\_relu, #dnnl\\_eltwise\\_tanh or #dnnl\\_eltwise\\_logistic. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `diff_src_layer_desc`: Memory descriptor for the diff of input vector. +* `diff_src_iter_desc`: Memory descriptor for the diff of input recurrent hidden state vector. +* `diff_weights_layer_desc`: Memory descriptor for the diff of weights applied to the layer input. +* `diff_weights_iter_desc`: Memory descriptor for the diff of weights applied to the recurrent input. +* `diff_bias_desc`: Diff bias memory descriptor. +* `diff_dst_layer_desc`: Memory descriptor for the diff of output vector. +* `diff_dst_iter_desc`: Memory descriptor for the diff of output recurrent hidden state vector. +* `flags`: Unused. +* `alpha`: Negative slope if activation is #dnnl\\_eltwise\\_relu. +* `beta`: Unused. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_vanilla_rnn_backward_primitive_desc_create(primitive_desc, engine, prop_kind, activation, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, alpha, beta, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_vanilla_rnn_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, activation::dnnl_alg_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, diff_src_layer_desc::const_dnnl_memory_desc_t, diff_src_iter_desc::const_dnnl_memory_desc_t, diff_weights_layer_desc::const_dnnl_memory_desc_t, diff_weights_iter_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_layer_desc::const_dnnl_memory_desc_t, diff_dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, alpha::Cfloat, beta::Cfloat, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lstm_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, src_iter_c_desc, weights_layer_desc, weights_iter_desc, weights_peephole_desc, weights_projection_desc, bias_desc, dst_layer_desc, dst_iter_desc, dst_iter_c_desc, flags, attr) + +Creates a primitive descriptor for an LSTM forward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `src_iter_c_desc`, - `weights_peephole_desc`, - `bias_desc`, - `dst_iter_desc` together with `dst_iter_c_desc`. + +This would then indicate that the LSTM forward propagation primitive should not use them and should default to zero values instead. + +The `weights_projection_desc` could either be `NULL` or point to a zero memory descriptor. This would then indicate that the LSTM doesn't have recurrent projection layer. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `src_iter_c_desc`: Memory descriptor for the input recurrent cell state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `weights_peephole_desc`: Memory descriptor for the weights applied to the cell states (according to the Peephole LSTM formula). +* `weights_projection_desc`: Memory descriptor for the weights applied to the hidden states to get the recurrent projection (according to the Projection LSTM formula). +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `dst_iter_c_desc`: Memory descriptor for the output recurrent cell state vector. +* `flags`: Unused. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lstm_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, src_iter_c_desc, weights_layer_desc, weights_iter_desc, weights_peephole_desc, weights_projection_desc, bias_desc, dst_layer_desc, dst_iter_desc, dst_iter_c_desc, flags, attr) + @ccall libdnnl.dnnl_lstm_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, src_iter_c_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, weights_peephole_desc::const_dnnl_memory_desc_t, weights_projection_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, dst_iter_c_desc::const_dnnl_memory_desc_t, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lstm_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, src_iter_c_desc, weights_layer_desc, weights_iter_desc, weights_peephole_desc, weights_projection_desc, bias_desc, dst_layer_desc, dst_iter_desc, dst_iter_c_desc, diff_src_layer_desc, diff_src_iter_desc, diff_src_iter_c_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_weights_peephole_desc, diff_weights_projection_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, diff_dst_iter_c_desc, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for an LSTM backward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `src_iter_c_desc`, `diff_src_iter_desc`, and `diff_src_iter_c_desc`, - `weights_peephole_desc` together with `diff_weights_peephole_desc`, - `bias_desc` together with `diff_bias_desc`, - `dst_iter_desc` together with `dst_iter_c_desc`, `diff_dst_iter_desc`, and `diff_dst_iter_c_desc`. + +This would then indicate that the LSTM backward propagation primitive should not use them and should default to zero values instead. + +The `weights_projection_desc` together with `diff_weights_projection_desc` could either be `NULL` or point to a zero memory descriptor. This would then indicate that the LSTM doesn't have recurrent projection layer. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Must be #dnnl\\_backward. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `src_iter_c_desc`: Memory descriptor for the input recurrent cell state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `weights_peephole_desc`: Memory descriptor for the weights applied to the cell states (according to the Peephole LSTM formula). +* `weights_projection_desc`: Memory descriptor for the weights applied to the hidden states to get the recurrent projection (according to the Projection LSTM formula). +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `dst_iter_c_desc`: Memory descriptor for the output recurrent cell state vector. +* `diff_src_layer_desc`: Memory descriptor for the diff of input vector. +* `diff_src_iter_desc`: Memory descriptor for the diff of input recurrent hidden state vector. +* `diff_src_iter_c_desc`: Memory descriptor for the diff of input recurrent cell state vector. +* `diff_weights_layer_desc`: Memory descriptor for the diff of weights applied to the layer input. +* `diff_weights_iter_desc`: Memory descriptor for the diff of weights applied to the recurrent input. +* `diff_weights_peephole_desc`: Memory descriptor for the diff of weights applied to the cell states (according to the Peephole LSTM formula). +* `diff_weights_projection_desc`: Memory descriptor for the diff of weights applied to the hidden states to get the recurrent projection (according to the Projection LSTM formula). +* `diff_bias_desc`: Diff bias memory descriptor. +* `diff_dst_layer_desc`: Memory descriptor for the diff of output vector. +* `diff_dst_iter_desc`: Memory descriptor for the diff of output recurrent hidden state vector. +* `diff_dst_iter_c_desc`: Memory descriptor for the diff of output recurrent cell state vector. +* `flags`: Unused. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lstm_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, src_iter_c_desc, weights_layer_desc, weights_iter_desc, weights_peephole_desc, weights_projection_desc, bias_desc, dst_layer_desc, dst_iter_desc, dst_iter_c_desc, diff_src_layer_desc, diff_src_iter_desc, diff_src_iter_c_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_weights_peephole_desc, diff_weights_projection_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, diff_dst_iter_c_desc, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_lstm_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, src_iter_c_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, weights_peephole_desc::const_dnnl_memory_desc_t, weights_projection_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, dst_iter_c_desc::const_dnnl_memory_desc_t, diff_src_layer_desc::const_dnnl_memory_desc_t, diff_src_iter_desc::const_dnnl_memory_desc_t, diff_src_iter_c_desc::const_dnnl_memory_desc_t, diff_weights_layer_desc::const_dnnl_memory_desc_t, diff_weights_iter_desc::const_dnnl_memory_desc_t, diff_weights_peephole_desc::const_dnnl_memory_desc_t, diff_weights_projection_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_layer_desc::const_dnnl_memory_desc_t, diff_dst_iter_desc::const_dnnl_memory_desc_t, diff_dst_iter_c_desc::const_dnnl_memory_desc_t, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_gru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + +Creates a primitive descriptor for GRU forward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc`, - `bias_desc`, - `dst_iter_desc`. + +This would then indicate that the GRU forward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `flags`: Unused. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_gru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + @ccall libdnnl.dnnl_gru_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_gru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for GRU backward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `diff_src_iter_desc`, - `bias_desc` together with `diff_bias_desc`, - `dst_iter_desc` together with `diff_dst_iter_desc`. + +This would then indicate that the GRU backward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Must be #dnnl\\_backward. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `diff_src_layer_desc`: Memory descriptor for the diff of input vector. +* `diff_src_iter_desc`: Memory descriptor for the diff of input recurrent hidden state vector. +* `diff_weights_layer_desc`: Memory descriptor for the diff of weights applied to the layer input. +* `diff_weights_iter_desc`: Memory descriptor for the diff of weights applied to the recurrent input. +* `diff_bias_desc`: Diff bias memory descriptor. +* `diff_dst_layer_desc`: Memory descriptor for the diff of output vector. +* `diff_dst_iter_desc`: Memory descriptor for the diff of output recurrent hidden state vector. +* `flags`: Unused. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_gru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_gru_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, diff_src_layer_desc::const_dnnl_memory_desc_t, diff_src_iter_desc::const_dnnl_memory_desc_t, diff_weights_layer_desc::const_dnnl_memory_desc_t, diff_weights_iter_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_layer_desc::const_dnnl_memory_desc_t, diff_dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lbr_gru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + +Creates a descriptor for LBR GRU forward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc`, - `bias_desc`, - `dst_iter_desc`. + +This would then indicate that the LBR GRU forward propagation primitive should not use them and should default to zero values instead. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `flags`: Unused. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lbr_gru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + @ccall libdnnl.dnnl_lbr_gru_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lbr_gru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for LBR GRU backward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `diff_src_iter_desc`, - `bias_desc` together with `diff_bias_desc`, - `dst_iter_desc` together with `diff_dst_iter_desc`. + +This would then indicate that the LBR GRU backward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Must be #dnnl\\_backward. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `diff_src_layer_desc`: Memory descriptor for the diff of input vector. +* `diff_src_iter_desc`: Memory descriptor for the diff of input recurrent hidden state vector. +* `diff_weights_layer_desc`: Memory descriptor for the diff of weights applied to the layer input. +* `diff_weights_iter_desc`: Memory descriptor for the diff of weights applied to the recurrent input. +* `diff_bias_desc`: Diff bias memory descriptor. +* `diff_dst_layer_desc`: Memory descriptor for the diff of output vector. +* `diff_dst_iter_desc`: Memory descriptor for the diff of output recurrent hidden state vector. +* `flags`: Unused. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lbr_gru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_lbr_gru_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, diff_src_layer_desc::const_dnnl_memory_desc_t, diff_src_iter_desc::const_dnnl_memory_desc_t, diff_weights_layer_desc::const_dnnl_memory_desc_t, diff_weights_iter_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_layer_desc::const_dnnl_memory_desc_t, diff_dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_augru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + +Creates a primitive descriptor for AUGRU forward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc`, - `bias_desc`, - `dst_iter_desc`. + +This would then indicate that the AUGRU forward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `attention_desc`: Memory descriptor for the attention vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `flags`: Unused. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_augru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + @ccall libdnnl.dnnl_augru_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, attention_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_augru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_attention_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for AUGRU backward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `diff_src_iter_desc`, - `bias_desc` together with `diff_bias_desc`, - `dst_iter_desc` together with `diff_dst_iter_desc`. + +This would then indicate that the AUGRU backward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Must be #dnnl\\_backward. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `attention_desc`: Memory descriptor for the attention vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `diff_src_layer_desc`: Memory descriptor for the diff of input vector. +* `diff_src_iter_desc`: Memory descriptor for the diff of input recurrent hidden state vector. +* `diff_attention_desc`: Memory descriptor for the diff of attention vector. +* `diff_weights_layer_desc`: Memory descriptor for the diff of weights applied to the layer input. +* `diff_weights_iter_desc`: Memory descriptor for the diff of weights applied to the recurrent input. +* `diff_bias_desc`: Diff bias memory descriptor. +* `diff_dst_layer_desc`: Memory descriptor for the diff of output vector. +* `diff_dst_iter_desc`: Memory descriptor for the diff of output recurrent hidden state vector. +* `flags`: Unused. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_augru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_attention_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_augru_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, attention_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, diff_src_layer_desc::const_dnnl_memory_desc_t, diff_src_iter_desc::const_dnnl_memory_desc_t, diff_attention_desc::const_dnnl_memory_desc_t, diff_weights_layer_desc::const_dnnl_memory_desc_t, diff_weights_iter_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_layer_desc::const_dnnl_memory_desc_t, diff_dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lbr_augru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + +Creates a primitive descriptor for LBR AUGRU forward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc`, - `bias_desc`, - `dst_iter_desc`. + +This would then indicate that the LBR AUGRU forward propagation primitive should not use them and should default to zero values instead. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `attention_desc`: Memory descriptor for the attention vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `flags`: Unused. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lbr_augru_forward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, flags, attr) + @ccall libdnnl.dnnl_lbr_augru_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, attention_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_lbr_augru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_attention_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + +Creates a primitive descriptor for LBR AUGRU backward propagation primitive. + +The following arguments may either be `NULL` or point to a zero memory descriptor: - `src_iter_desc` together with `diff_src_iter_desc`, - `bias_desc` together with `diff_bias_desc`, - `dst_iter_desc` together with `diff_dst_iter_desc`. + +This would then indicate that the LBR AUGRU backward propagation primitive should not use them and should default to zero values instead. + +!!! note + + All memory descriptors can be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Must be #dnnl\\_backward. +* `direction`: RNN direction. See dnnl_rnn_direction_t for more info. +* `src_layer_desc`: Memory descriptor for the input vector. +* `src_iter_desc`: Memory descriptor for the input recurrent hidden state vector. +* `attention_desc`: Memory descriptor for the attention vector. +* `weights_layer_desc`: Memory descriptor for the weights applied to the layer input. +* `weights_iter_desc`: Memory descriptor for the weights applied to the recurrent input. +* `bias_desc`: Bias memory descriptor. +* `dst_layer_desc`: Memory descriptor for the output vector. +* `dst_iter_desc`: Memory descriptor for the output recurrent hidden state vector. +* `diff_src_layer_desc`: Memory descriptor for the diff of input vector. +* `diff_src_iter_desc`: Memory descriptor for the diff of input recurrent hidden state vector. +* `diff_attention_desc`: Memory descriptor for the diff of attention vector. +* `diff_weights_layer_desc`: Memory descriptor for the diff of weights applied to the layer input. +* `diff_weights_iter_desc`: Memory descriptor for the diff of weights applied to the recurrent input. +* `diff_bias_desc`: Diff bias memory descriptor. +* `diff_dst_layer_desc`: Memory descriptor for the diff of output vector. +* `diff_dst_iter_desc`: Memory descriptor for the diff of output recurrent hidden state vector. +* `flags`: Unused. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_lbr_augru_backward_primitive_desc_create(primitive_desc, engine, prop_kind, direction, src_layer_desc, src_iter_desc, attention_desc, weights_layer_desc, weights_iter_desc, bias_desc, dst_layer_desc, dst_iter_desc, diff_src_layer_desc, diff_src_iter_desc, diff_attention_desc, diff_weights_layer_desc, diff_weights_iter_desc, diff_bias_desc, diff_dst_layer_desc, diff_dst_iter_desc, flags, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_lbr_augru_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, direction::dnnl_rnn_direction_t, src_layer_desc::const_dnnl_memory_desc_t, src_iter_desc::const_dnnl_memory_desc_t, attention_desc::const_dnnl_memory_desc_t, weights_layer_desc::const_dnnl_memory_desc_t, weights_iter_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_layer_desc::const_dnnl_memory_desc_t, dst_iter_desc::const_dnnl_memory_desc_t, diff_src_layer_desc::const_dnnl_memory_desc_t, diff_src_iter_desc::const_dnnl_memory_desc_t, diff_attention_desc::const_dnnl_memory_desc_t, diff_weights_layer_desc::const_dnnl_memory_desc_t, diff_weights_iter_desc::const_dnnl_memory_desc_t, diff_bias_desc::const_dnnl_memory_desc_t, diff_dst_layer_desc::const_dnnl_memory_desc_t, diff_dst_iter_desc::const_dnnl_memory_desc_t, flags::Cuint, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_matmul_primitive_desc_create(primitive_desc, engine, src_desc, weights_desc, bias_desc, dst_desc, attr) + +Creates a primitive descriptor for a matrix multiplication primitive. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `src_desc`: Source memory descriptor (matrix A) +* `weights_desc`: Weights memory descriptor (matrix B) +* `bias_desc`: Bias memory descriptor. Passing NULL, a zero memory descriptor, or a memory descriptor with format\\_kind set to #dnnl\\_format\\_kind\\_undef disables the bias term. +* `dst_desc`: Destination memory descriptor (matrix C). +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_matmul_primitive_desc_create(primitive_desc, engine, src_desc, weights_desc, bias_desc, dst_desc, attr) + @ccall libdnnl.dnnl_matmul_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, src_desc::const_dnnl_memory_desc_t, weights_desc::const_dnnl_memory_desc_t, bias_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_resampling_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, factors, src_desc, dst_desc, attr) + +Creates a primitive descriptor for a resampling forward propagation primitive. + +!!! note + + Destination memory descriptor is allowed to be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `prop_kind`: Propagation kind. Possible values are #dnnl\\_forward\\_training and #dnnl\\_forward\\_inference. +* `alg_kind`: resampling algorithm kind: either #dnnl\\_resampling\\_nearest, or #dnnl\\_resampling\\_linear. +* `factors`: Array of scaling factors for spatial dimension. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_resampling_forward_primitive_desc_create(primitive_desc, engine, prop_kind, alg_kind, factors, src_desc, dst_desc, attr) + @ccall libdnnl.dnnl_resampling_forward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, prop_kind::dnnl_prop_kind_t, alg_kind::dnnl_alg_kind_t, factors::Ptr{Cfloat}, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_resampling_backward_primitive_desc_create(primitive_desc, engine, alg_kind, factors, diff_src_desc, diff_dst_desc, hint_fwd_pd, attr) + +Creates a primitive descriptor for a resampling backward propagation primitive. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: resamplinging algorithm kind: either #dnnl\\_resampling\\_nearest, or #dnnl\\_resampling\\_linear. +* `diff_src_desc`: Diff source memory descriptor. +* `diff_dst_desc`: Diff destination memory descriptor. +* `factors`: Array of scaling factors for spatial dimension. +* `hint_fwd_pd`: Primitive descriptor for a respective forward propagation primitive. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_resampling_backward_primitive_desc_create(primitive_desc, engine, alg_kind, factors, diff_src_desc, diff_dst_desc, hint_fwd_pd, attr) + @ccall libdnnl.dnnl_resampling_backward_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, factors::Ptr{Cfloat}, diff_src_desc::const_dnnl_memory_desc_t, diff_dst_desc::const_dnnl_memory_desc_t, hint_fwd_pd::const_dnnl_primitive_desc_t, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_reduction_primitive_desc_create(primitive_desc, engine, alg_kind, src_desc, dst_desc, p, eps, attr) + +Creates a primitive descriptor for a reduction primitive. + +!!! note + + Destination memory descriptor is allowed to be initialized with #dnnl\\_format\\_tag\\_any or with format\\_kind set to #dnnl\\_format\\_kind\\_any. + +# Arguments +* `primitive_desc`: Output primitive descriptor. +* `engine`: Engine to use. +* `alg_kind`: reduction algorithm kind. Possible values: #dnnl\\_reduction\\_max, #dnnl\\_reduction\\_min, #dnnl\\_reduction\\_sum, #dnnl\\_reduction\\_mul, #dnnl\\_reduction\\_mean, #dnnl\\_reduction\\_norm\\_lp\\_max, #dnnl\\_reduction\\_norm\\_lp\\_sum, #dnnl\\_reduction\\_norm\\_lp\\_power\\_p\\_max, #dnnl\\_reduction\\_norm\\_lp\\_power\\_p\\_sum. +* `p`: Algorithm specific parameter. +* `eps`: Algorithm specific parameter. +* `src_desc`: Source memory descriptor. +* `dst_desc`: Destination memory descriptor. +* `attr`: Primitive attributes (can be NULL). +# Returns +#dnnl\\_success on success and a status describing the error otherwise. +""" +function dnnl_reduction_primitive_desc_create(primitive_desc, engine, alg_kind, src_desc, dst_desc, p, eps, attr) + @ccall libdnnl.dnnl_reduction_primitive_desc_create(primitive_desc::Ptr{dnnl_primitive_desc_t}, engine::dnnl_engine_t, alg_kind::dnnl_alg_kind_t, src_desc::const_dnnl_memory_desc_t, dst_desc::const_dnnl_memory_desc_t, p::Cfloat, eps::Cfloat, attr::const_dnnl_primitive_attr_t)::dnnl_status_t +end + +""" + dnnl_get_primitive_cache_capacity(capacity) + +Returns the number of primitives that can be held in the primitive cache at the same time. + +# Arguments +* `capacity`: Primitive cache capacity to query. Concurrently accessing `capacity` is safe. +# Returns +#dnnl\\_invalid\\_arguments/#dnnl::status::invalid\\_arguments if the `capacity` value is invalid, and #dnnl\\_success/#dnnl::status::success on success. +""" +function dnnl_get_primitive_cache_capacity(capacity) + @ccall libdnnl.dnnl_get_primitive_cache_capacity(capacity::Ptr{Cint})::dnnl_status_t +end + +""" + dnnl_set_primitive_cache_capacity(capacity) + +Sets a number of primitives that can be held in the primitive cache at a time. + +# Arguments +* `capacity`: Primitive cache capacity to set. If a new `capacity` is less than a number of primitives that the primitive cache already has then the excess entries will be evicted. Setting the `capacity` to 0 clears the primitive cache and disables it. Concurrently modifying `capacity` is safe. +# Returns +#dnnl\\_invalid\\_arguments/#dnnl::status::invalid\\_arguments if the `capacity` value is invalid, and #dnnl\\_success/#dnnl::status::success on success. +""" +function dnnl_set_primitive_cache_capacity(capacity) + @ccall libdnnl.dnnl_set_primitive_cache_capacity(capacity::Cint)::dnnl_status_t +end + +""" + dnnl_set_jit_dump(enable) + +Configures dumping of JIT-generated code. + +!!! note + + This setting overrides the DNNL\\_JIT\\_DUMP environment variable. + +# Arguments +* `enable`: Flag value. Set to 0 to disable and set to 1 to enable. +# Returns +#dnnl\\_invalid\\_arguments/#dnnl::status::invalid\\_arguments if the `flag` value is invalid, and #dnnl\\_success/#dnnl::status::success on success. +""" +function dnnl_set_jit_dump(enable) + @ccall libdnnl.dnnl_set_jit_dump(enable::Cint)::dnnl_status_t +end + +""" + dnnl_set_jit_profiling_flags(flags) + +Sets library profiling flags. The flags define which profilers are supported. + +!!! note + + This setting overrides DNNL\\_JIT\\_PROFILE environment variable. + +Passing DNNL_JIT_PROFILE_NONE disables profiling completely. + +# Arguments +* `flags`: Profiling flags that can contain the following bits: - DNNL_JIT_PROFILE_VTUNE -- integration with VTune Profiler (on by default) - DNNL_JIT_PROFILE_LINUX_JITDUMP -- produce Linux-specific jit-pid.dump output (off by default). The location of the output is controlled via JITDUMPDIR environment variable or via [`dnnl_set_jit_profiling_jitdumpdir`](@ref)() function. - DNNL_JIT_PROFILE_LINUX_PERFMAP -- produce Linux-specific perf-pid.map output (off by default). The output is always placed into /tmp. +# Returns +#dnnl\\_invalid\\_arguments/#dnnl::status::invalid\\_arguments if the `flags` value is invalid, and #dnnl\\_success/#dnnl::status::success on success. +# See also +dev_guide_profilers +""" +function dnnl_set_jit_profiling_flags(flags) + @ccall libdnnl.dnnl_set_jit_profiling_flags(flags::Cuint)::dnnl_status_t +end + +""" + dnnl_set_jit_profiling_jitdumpdir(dir) + +Sets JIT dump output path. Only applicable to Linux and is only used when profiling flags have [`DNNL_JIT_PROFILE_LINUX_PERF`](@ref) bit set. + +After the first JIT kernel is generated, the jitdump output will be placed into temporary directory created using the mkdtemp template 'dir/.debug/jit/dnnl.XXXXXX'. + +!!! note + + This setting overrides JITDUMPDIR environment variable. If JITDUMPDIR is not set, and this function is never called, the path defaults to HOME. Passing NULL reverts the value to default. + +!!! note + + The directory is accessed only when the first JIT kernel is being created. JIT profiling will be disabled in case of any errors accessing or creating this directory. + +# Arguments +* `dir`: JIT dump output path. +# Returns +#dnnl\\_unimplemented/#dnnl::status::unimplemented on Windows. +# See also +dev_guide_profilers +""" +function dnnl_set_jit_profiling_jitdumpdir(dir) + @ccall libdnnl.dnnl_set_jit_profiling_jitdumpdir(dir::Cstring)::dnnl_status_t +end + +""" + dnnl_set_max_cpu_isa(isa) + +Sets the maximal ISA the library can dispatch to on the CPU. See #[`dnnl_cpu_isa_t`](@ref) and #dnnl::cpu\\_isa for the list of the values accepted by the C and C++ API functions respectively. + +This function has effect only once, and returns an error on subsequent calls. It should also be invoked before any other oneDNN API call, otherwise it may return an error. + +This function overrides the DNNL\\_MAX\\_CPU\\_ISA environment variable. The environment variable can be set to the desired maximal ISA name in upper case and with dnnl\\_cpu\\_isa prefix removed. For example: `DNNL\\_MAX\\_CPU\\_ISA=AVX2`. + +!!! note + + The ISAs are only partially ordered: - SSE41 < AVX < AVX2 < AVX2\\_VNNI < AVX2\\_VNNI\\_2, - AVX2 < AVX512\\_CORE < AVX512\\_CORE\\_VNNI < AVX512\\_CORE\\_BF16 < AVX10\\_1\\_512 < AVX10\\_1\\_512\\_AMX < AVX10\\_1\\_512\\_AMX\\_FP16, - AVX2\\_VNNI < AVX10\\_1\\_512. Aliases: - AVX512\\_CORE\\_FP16 = AVX10\\_1\\_512 - AVX512\\_CORE\\_AMX = AVX10\\_1\\_512\\_AMX - AVX512\\_CORE\\_AMX\\_FP16 = AVX10\\_1\\_512\\_AMX\\_FP16 + +# Arguments +* `isa`: Maximal ISA the library should dispatch to. Pass #dnnl\\_cpu\\_isa\\_default/#dnnl::cpu\\_isa::isa\\_default to remove ISA restrictions (except for ISAs with initial support in the library). +# Returns +#dnnl\\_unimplemented/#dnnl::status::unimplemented if the feature was disabled at build time (see dev_guide_build_options for more details). +# See also +dev_guide_cpu_dispatcher_control for more details +""" +function dnnl_set_max_cpu_isa(isa) + @ccall libdnnl.dnnl_set_max_cpu_isa(isa::dnnl_cpu_isa_t)::dnnl_status_t +end + +""" + dnnl_get_effective_cpu_isa() + +Gets the maximal ISA the library can dispatch to on the CPU. See #[`dnnl_cpu_isa_t`](@ref) and #dnnl::cpu\\_isa for the list of the values returned by the C and C++ API functions respectively. + +# Returns +#[`dnnl_cpu_isa_t`](@ref) value reflecting the maximal ISA the library may dispatch to. +# See also +dev_guide_cpu_dispatcher_control for more details +""" +function dnnl_get_effective_cpu_isa() + @ccall libdnnl.dnnl_get_effective_cpu_isa()::dnnl_cpu_isa_t +end + +""" + dnnl_set_cpu_isa_hints(isa_hints) + +Sets the hints flag for the CPU ISA. See #[`dnnl_cpu_isa_hints_t`](@ref) and #dnnl::cpu\\_isa\\_hints for the list of the values accepted by the C and C++ API functions respectively. + +This function has effect only once, and returns an error on subsequent calls. It should also be invoked before any other oneDNN API call, otherwise it may return an error. + +This function overrides the DNNL\\_CPU\\_ISA\\_HINTS environment variable. + +# Arguments +* `isa_hints`: CPU ISA hints to be passed over to the implementation. Pass #dnnl\\_cpu\\_isa\\_no\\_hints/#dnnl::cpu\\_isa\\_hints::no\\_hints to use default features i.e. no hints. +# Returns +#dnnl\\_unimplemented/#dnnl::status::unimplemented if the feature was disabled at build time (see dev_guide_build_options for more details). +# See also +dev_guide_cpu_isa_hints for more details +""" +function dnnl_set_cpu_isa_hints(isa_hints) + @ccall libdnnl.dnnl_set_cpu_isa_hints(isa_hints::dnnl_cpu_isa_hints_t)::dnnl_status_t +end + +""" + dnnl_get_cpu_isa_hints() + +Gets the ISA specific hints that library can follow. See #[`dnnl_cpu_isa_hints_t`](@ref) and #dnnl::cpu\\_isa\\_hints for the list of the values returned by the C and C++ API functions respectively. + +# Returns +#[`dnnl_cpu_isa_hints_t`](@ref) value reflecting the ISA specific hints the library can follow. +# See also +dev_guide_cpu_isa_hints for more details +""" +function dnnl_get_cpu_isa_hints() + @ccall libdnnl.dnnl_get_cpu_isa_hints()::dnnl_cpu_isa_hints_t +end + +""" + dnnl_sgemm(transa, transb, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc) + +Performs single-precision matrix-matrix multiply. + +The operation is defined as: + +`C := alpha * op( A ) * op( B ) + beta * C` + +where - `op( X ) = X` or `op( X ) = X**T`, - `alpha` and `beta` are scalars, and - `A`, `B`, and `C` are matrices: - `op( A )` is an `MxK` matrix, - `op( B )` is an `KxN` matrix, - `C` is an `MxN` matrix. + +The matrices are assumed to be stored in row-major order (the elements in each of the matrix rows are contiguous in memory). + +!!! note + + This API does not support XERBLA. Instead, unlike the standard BLAS functions, this one returns a [`dnnl_status_t`](@ref) value to allow error handling. + +# Arguments +* `transa`: Transposition flag for matrix A: 'N' or 'n' means A is not transposed, and 'T' or 't' means that A is transposed. +* `transb`: Transposition flag for matrix B: 'N' or 'n' means B is not transposed, and 'T' or 't' means that B is transposed. +* `M`: The M dimension. +* `N`: The N dimension. +* `K`: The K dimension. +* `alpha`: The alpha parameter that is used to scale the product of matrices A and B. +* `A`: A pointer to the A matrix data. +* `lda`: The leading dimension for the matrix A. +* `B`: A pointer to the B matrix data. +* `ldb`: The leading dimension for the matrix B. +* `beta`: The beta parameter that is used to scale the matrix C. +* `C`: A pointer to the C matrix data. +* `ldc`: The leading dimension for the matrix C. +# Returns +#dnnl\\_success/#dnnl::status::success on success and a status describing the error otherwise. +""" +function dnnl_sgemm(transa, transb, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc) + @ccall libdnnl.dnnl_sgemm(transa::Cchar, transb::Cchar, M::dnnl_dim_t, N::dnnl_dim_t, K::dnnl_dim_t, alpha::Cfloat, A::Ptr{Cfloat}, lda::dnnl_dim_t, B::Ptr{Cfloat}, ldb::dnnl_dim_t, beta::Cfloat, C::Ptr{Cfloat}, ldc::dnnl_dim_t)::dnnl_status_t +end + +""" + dnnl_gemm_u8s8s32(transa, transb, offsetc, M, N, K, alpha, A, lda, ao, B, ldb, bo, beta, C, ldc, co) + +Performs integer matrix-matrix multiply on 8-bit unsigned matrix A, 8-bit signed matrix B, and 32-bit signed resulting matrix C. + +The operation is defined as: + +`C := alpha * (op(A) - A\\_offset) * (op(B) - B\\_offset) + beta * C + C\\_offset` + +where - `op( X ) = X` or `op( X ) = X**T`, - `alpha` and `beta` are scalars, and - `A`, `B`, and `C` are matrices: - `op( A )` is an `MxK` matrix, - `op( B )` is an `KxN` matrix, - `C` is an `MxN` matrix. - `A_offset` is an `MxK` matrix with every element equal the `ao` value, - `B_offset` is an `KxN` matrix with every element equal the `bo` value, - `C_offset` is an `MxN` matrix which is defined by the `co` array of size `len`: - if `offsetc = F`: the `len` must be at least `1`, - if `offsetc = C`: the `len` must be at least `max(1, m)`, - if `offsetc = R`: the `len` must be at least `max(1, n)`, + +The matrices are assumed to be stored in row-major order (the elements in each of the matrix rows are contiguous in memory). + +!!! note + + This API does not support XERBLA. Instead, unlike the standard BLAS functions, this one returns a [`dnnl_status_t`](@ref) value to allow error handling. + +!!! warning + + On some architectures saturation may happen during intermediate computations, which would lead to unexpected results. For more details, refer to dev_guide_int8_computations. + +# Arguments +* `transa`: Transposition flag for matrix A: 'N' or 'n' means A is not transposed, and 'T' or 't' means that A is transposed. +* `transb`: Transposition flag for matrix B: 'N' or 'n' means B is not transposed, and 'T' or 't' means that B is transposed. +* `offsetc`: Flag specifying how offsets should be applied to matrix C: - 'F' means that the same offset will be applied to each element of the matrix C, - 'C' means that individual offset will be applied to each element within each column, - 'R' means that individual offset will be applied to each element within each row. +* `M`: The M dimension. +* `N`: The N dimension. +* `K`: The K dimension. +* `alpha`: The alpha parameter that is used to scale the product of matrices A and B. +* `A`: A pointer to the A matrix data. +* `lda`: The leading dimension for the matrix A. +* `ao`: The offset value for the matrix A. +* `B`: A pointer to the B matrix data. +* `ldb`: The leading dimension for the matrix B. +* `bo`: The offset value for the matrix B. +* `beta`: The beta parameter that is used to scale the matrix C. +* `C`: A pointer to the C matrix data. +* `ldc`: The leading dimension for the matrix C. +* `co`: An array of offset values for the matrix C. The number of elements in the array depends on the value of `offsetc`. +# Returns +#dnnl\\_success/#dnnl::status::success on success and a status describing the error otherwise. +""" +function dnnl_gemm_u8s8s32(transa, transb, offsetc, M, N, K, alpha, A, lda, ao, B, ldb, bo, beta, C, ldc, co) + @ccall libdnnl.dnnl_gemm_u8s8s32(transa::Cchar, transb::Cchar, offsetc::Cchar, M::dnnl_dim_t, N::dnnl_dim_t, K::dnnl_dim_t, alpha::Cfloat, A::Ptr{UInt8}, lda::dnnl_dim_t, ao::UInt8, B::Ptr{Int8}, ldb::dnnl_dim_t, bo::Int8, beta::Cfloat, C::Ptr{Int32}, ldc::dnnl_dim_t, co::Ptr{Int32})::dnnl_status_t +end + +""" + dnnl_gemm_s8s8s32(transa, transb, offsetc, M, N, K, alpha, A, lda, ao, B, ldb, bo, beta, C, ldc, co) + +Performs integer matrix-matrix multiply on 8-bit signed matrix A, 8-bit signed matrix B, and 32-bit signed resulting matrix C. + +The operation is defined as: + +`C := alpha * (op(A) - A\\_offset) * (op(B) - B\\_offset) + beta * C + C\\_offset` + +where - `op( X ) = X` or `op( X ) = X**T`, - `alpha` and `beta` are scalars, and - `A`, `B`, and `C` are matrices: - `op( A )` is an `MxK` matrix, - `op( B )` is an `KxN` matrix, - `C` is an `MxN` matrix. - `A_offset` is an `MxK` matrix with every element equal the `ao` value, - `B_offset` is an `KxN` matrix with every element equal the `bo` value, - `C_offset` is an `MxN` matrix which is defined by the `co` array of size `len`: - if `offsetc = F`: the `len` must be at least `1`, - if `offsetc = C`: the `len` must be at least `max(1, m)`, - if `offsetc = R`: the `len` must be at least `max(1, n)`, + +The matrices are assumed to be stored in row-major order (the elements in each of the matrix rows are contiguous in memory). + +!!! note + + This API does not support XERBLA. Instead, unlike the standard BLAS functions, this one returns a [`dnnl_status_t`](@ref) value to allow error handling. + +!!! warning + + On some architectures saturation may happen during intermediate computations, which would lead to unexpected results. For more details, refer to dev_guide_int8_computations. + +# Arguments +* `transa`: Transposition flag for matrix A: 'N' or 'n' means A is not transposed, and 'T' or 't' means that A is transposed. +* `transb`: Transposition flag for matrix B: 'N' or 'n' means B is not transposed, and 'T' or 't' means that B is transposed. +* `offsetc`: Flag specifying how offsets should be applied to matrix C: - 'F' means that the same offset will be applied to each element of the matrix C, - 'C' means that individual offset will be applied to each element within each column, - 'R' means that individual offset will be applied to each element within each row. +* `M`: The M dimension. +* `N`: The N dimension. +* `K`: The K dimension. +* `alpha`: The alpha parameter that is used to scale the product of matrices A and B. +* `A`: A pointer to the A matrix data. +* `lda`: The leading dimension for the matrix A. +* `ao`: The offset value for the matrix A. +* `B`: A pointer to the B matrix data. +* `ldb`: The leading dimension for the matrix B. +* `bo`: The offset value for the matrix B. +* `beta`: The beta parameter that is used to scale the matrix C. +* `C`: A pointer to the C matrix data. +* `ldc`: The leading dimension for the matrix C. +* `co`: An array of offset values for the matrix C. The number of elements in the array depends on the value of `offsetc`. +# Returns +#dnnl\\_success/#dnnl::status::success on success and a status describing the error otherwise. +""" +function dnnl_gemm_s8s8s32(transa, transb, offsetc, M, N, K, alpha, A, lda, ao, B, ldb, bo, beta, C, ldc, co) + @ccall libdnnl.dnnl_gemm_s8s8s32(transa::Cchar, transb::Cchar, offsetc::Cchar, M::dnnl_dim_t, N::dnnl_dim_t, K::dnnl_dim_t, alpha::Cfloat, A::Ptr{Int8}, lda::dnnl_dim_t, ao::Int8, B::Ptr{Int8}, ldb::dnnl_dim_t, bo::Int8, beta::Cfloat, C::Ptr{Int32}, ldc::dnnl_dim_t, co::Ptr{Int32})::dnnl_status_t +end + +# Skipping MacroDefinition: DNNL_HELPER_DLL_IMPORT __attribute__ ( ( visibility ( "default" ) ) ) + +# Skipping MacroDefinition: DNNL_HELPER_DLL_EXPORT __attribute__ ( ( visibility ( "default" ) ) ) + +# Skipping MacroDefinition: DNNL_DEPRECATED __attribute__ ( ( deprecated ) ) + +const DNNL_RUNTIME_NONE = Cuint(0) + +const DNNL_RUNTIME_SEQ = Cuint(1) + +const DNNL_RUNTIME_OMP = Cuint(2) + +const DNNL_RUNTIME_TBB = Cuint(4) + +const DNNL_RUNTIME_THREADPOOL = Cuint(8) + +const DNNL_RUNTIME_OCL = Cuint(256) + +const DNNL_RUNTIME_SYCL = Cuint(512) + +const DNNL_RUNTIME_DPCPP = DNNL_RUNTIME_SYCL + +const DNNL_CPU_THREADING_RUNTIME = DNNL_RUNTIME_OMP + +const DNNL_CPU_RUNTIME = DNNL_RUNTIME_OMP + +const DNNL_GPU_RUNTIME = DNNL_RUNTIME_NONE + +const BUILD_TRAINING = 1 + +const BUILD_INFERENCE = 0 + +const BUILD_PRIMITIVE_ALL = 1 + +const BUILD_BATCH_NORMALIZATION = 0 + +const BUILD_BINARY = 0 + +const BUILD_CONCAT = 0 + +const BUILD_CONVOLUTION = 0 + +const BUILD_DECONVOLUTION = 0 + +const BUILD_ELTWISE = 0 + +const BUILD_GROUP_NORMALIZATION = 0 + +const BUILD_INNER_PRODUCT = 0 + +const BUILD_LAYER_NORMALIZATION = 0 + +const BUILD_LRN = 0 + +const BUILD_MATMUL = 0 + +const BUILD_POOLING = 0 + +const BUILD_PRELU = 0 + +const BUILD_REDUCTION = 0 + +const BUILD_REORDER = 0 + +const BUILD_RESAMPLING = 0 + +const BUILD_RNN = 0 + +const BUILD_SHUFFLE = 0 + +const BUILD_SOFTMAX = 0 + +const BUILD_SUM = 0 + +const BUILD_PRIMITIVE_CPU_ISA_ALL = 1 + +const BUILD_SSE41 = 0 + +const BUILD_AVX2 = 0 + +const BUILD_AVX512 = 0 + +const BUILD_AMX = 0 + +const BUILD_PRIMITIVE_GPU_ISA_ALL = 1 + +const BUILD_GEN9 = 0 + +const BUILD_GEN11 = 0 + +const BUILD_XELP = 0 + +const BUILD_XEHP = 0 + +const BUILD_XEHPG = 0 + +const BUILD_XEHPC = 0 + +const BUILD_XE2 = 0 + +const BUILD_GEMM_KERNELS_ALL = 1 + +const BUILD_GEMM_KERNELS_NONE = 0 + +const BUILD_GEMM_SSE41 = 0 + +const BUILD_GEMM_AVX2 = 0 + +const BUILD_GEMM_AVX512 = 0 + +const DNNL_MAX_NDIMS = 12 + +const DNNL_MEMORY_NONE = NULL + +# Skipping MacroDefinition: DNNL_MEMORY_ALLOCATE ( ( void * ) ( size_t ) - 1 ) + +const DNNL_VERSION_MAJOR = 3 + +const DNNL_VERSION_MINOR = 5 + +const DNNL_VERSION_PATCH = 3 + +const DNNL_VERSION_HASH = "66f0cb9eb66affd2da3bf5f8d897376f04aae6af" + +const DNNL_RUNTIME_DIM_VAL = -9223372036854775808 + +const DNNL_RUNTIME_SIZE_VAL = 0x8000000000000000 + +# Skipping MacroDefinition: DNNL_RUNTIME_F32_VAL ( DNNL_RUNTIME_F32_VAL_REP . f ) + +const DNNL_RUNTIME_S32_VAL = 0 + +const DNNL_ARG_UNDEF = 0 + +const DNNL_ARG_SRC_0 = 1 + +const DNNL_ARG_SRC = DNNL_ARG_SRC_0 + +const DNNL_ARG_SRC_LAYER = DNNL_ARG_SRC_0 + +const DNNL_ARG_FROM = DNNL_ARG_SRC_0 + +const DNNL_ARG_SRC_1 = 2 + +const DNNL_ARG_SRC_ITER = DNNL_ARG_SRC_1 + +const DNNL_ARG_SRC_2 = 3 + +const DNNL_ARG_SRC_ITER_C = DNNL_ARG_SRC_2 + +const DNNL_ARG_SRC_3 = 4 + +const DNNL_ARG_AUGRU_ATTENTION = DNNL_ARG_SRC_3 + +const DNNL_ARG_DST_0 = 17 + +const DNNL_ARG_DST = DNNL_ARG_DST_0 + +const DNNL_ARG_TO = DNNL_ARG_DST_0 + +const DNNL_ARG_DST_LAYER = DNNL_ARG_DST_0 + +const DNNL_ARG_DST_1 = 18 + +const DNNL_ARG_DST_ITER = DNNL_ARG_DST_1 + +const DNNL_ARG_DST_2 = 19 + +const DNNL_ARG_DST_ITER_C = DNNL_ARG_DST_2 + +const DNNL_ARG_WEIGHTS_0 = 33 + +const DNNL_ARG_WEIGHTS = DNNL_ARG_WEIGHTS_0 + +const DNNL_ARG_WEIGHTS_LAYER = DNNL_ARG_WEIGHTS_0 + +const DNNL_ARG_WEIGHTS_1 = 34 + +const DNNL_ARG_WEIGHTS_ITER = DNNL_ARG_WEIGHTS_1 + +const DNNL_ARG_WEIGHTS_2 = 35 + +const DNNL_ARG_WEIGHTS_PEEPHOLE = DNNL_ARG_WEIGHTS_2 + +const DNNL_ARG_WEIGHTS_3 = 36 + +const DNNL_ARG_WEIGHTS_PROJECTION = DNNL_ARG_WEIGHTS_3 + +const DNNL_ARG_BIAS = 41 + +const DNNL_ARG_MEAN = 49 + +const DNNL_ARG_VARIANCE = 50 + +const DNNL_ARG_SCALE = 51 + +const DNNL_ARG_SHIFT = 52 + +const DNNL_ARG_WORKSPACE = 64 + +const DNNL_ARG_SCRATCHPAD = 80 + +const DNNL_ARG_DIFF_SRC_0 = 129 + +const DNNL_ARG_DIFF_SRC = DNNL_ARG_DIFF_SRC_0 + +const DNNL_ARG_DIFF_SRC_LAYER = DNNL_ARG_DIFF_SRC_0 + +const DNNL_ARG_DIFF_SRC_1 = 130 + +const DNNL_ARG_DIFF_SRC_ITER = DNNL_ARG_DIFF_SRC_1 + +const DNNL_ARG_DIFF_SRC_2 = 131 + +const DNNL_ARG_DIFF_SRC_ITER_C = DNNL_ARG_DIFF_SRC_2 + +const DNNL_ARG_DIFF_SRC_3 = 132 + +const DNNL_ARG_DIFF_AUGRU_ATTENTION = DNNL_ARG_DIFF_SRC_3 + +const DNNL_ARG_DIFF_DST_0 = 145 + +const DNNL_ARG_DIFF_DST = DNNL_ARG_DIFF_DST_0 + +const DNNL_ARG_DIFF_DST_LAYER = DNNL_ARG_DIFF_DST_0 + +const DNNL_ARG_DIFF_DST_1 = 146 + +const DNNL_ARG_DIFF_DST_ITER = DNNL_ARG_DIFF_DST_1 + +const DNNL_ARG_DIFF_DST_2 = 147 + +const DNNL_ARG_DIFF_DST_ITER_C = DNNL_ARG_DIFF_DST_2 + +const DNNL_ARG_DIFF_WEIGHTS_0 = 161 + +const DNNL_ARG_DIFF_WEIGHTS = DNNL_ARG_DIFF_WEIGHTS_0 + +const DNNL_ARG_DIFF_WEIGHTS_LAYER = DNNL_ARG_DIFF_WEIGHTS_0 + +const DNNL_ARG_DIFF_WEIGHTS_1 = 162 + +const DNNL_ARG_DIFF_WEIGHTS_ITER = DNNL_ARG_DIFF_WEIGHTS_1 + +const DNNL_ARG_DIFF_WEIGHTS_2 = 163 + +const DNNL_ARG_DIFF_WEIGHTS_PEEPHOLE = DNNL_ARG_DIFF_WEIGHTS_2 + +const DNNL_ARG_DIFF_WEIGHTS_3 = 164 + +const DNNL_ARG_DIFF_WEIGHTS_PROJECTION = DNNL_ARG_DIFF_WEIGHTS_3 + +const DNNL_ARG_DIFF_BIAS = 169 + +const DNNL_ARG_DIFF_SCALE = 255 + +const DNNL_ARG_DIFF_SHIFT = 256 + +const DNNL_ARG_ATTR_OUTPUT_SCALES = 513 + +const DNNL_ARG_MULTIPLE_SRC = 1024 + +const DNNL_ARG_MULTIPLE_DST = 2048 + +const DNNL_ARG_ATTR_SCALES = 4096 + +const DNNL_ARG_ATTR_ZERO_POINTS = 8192 + +const DNNL_ARG_ATTR_POST_OP_DW = 16384 + +const DNNL_ARG_ATTR_MULTIPLE_POST_OP_BASE = 32768 + +const DNNL_JIT_PROFILE_NONE = Cuint(0) + +const DNNL_JIT_PROFILE_VTUNE = Cuint(1) + +const DNNL_JIT_PROFILE_LINUX_PERFMAP = Cuint(2) + +const DNNL_JIT_PROFILE_LINUX_JITDUMP = Cuint(4) + +const DNNL_JIT_PROFILE_LINUX_JITDUMP_USE_TSC = Cuint(8) + +const DNNL_JIT_PROFILE_LINUX_PERF = DNNL_JIT_PROFILE_LINUX_JITDUMP | DNNL_JIT_PROFILE_LINUX_PERFMAP + +#! format: on + +# exports +const PREFIXES = ["CX", "clang_"] +for name in names(@__MODULE__; all=true), prefix in PREFIXES + if startswith(string(name), prefix) + @eval export $name + end +end + +end # module diff --git a/src/onednn/memory.jl b/src/onednn/memory.jl new file mode 100644 index 00000000..69c4fae7 --- /dev/null +++ b/src/onednn/memory.jl @@ -0,0 +1,344 @@ +# Memory Descriptor +struct MemoryDesc + handle::Lib.dnnl_memory_desc_t + + MemoryDesc(x::Lib.dnnl_memory_desc_t) = new(x) + MemoryDesc(x) = memory_descriptor(x) +end + +Base.unsafe_convert(::Type{Lib.dnnl_memory_desc_t}, x::MemoryDesc) = x.handle +function Base.unsafe_convert(::Type{Ptr{Lib.dnnl_memory_desc_t}}, x::MemoryDesc) + return Base.unsafe_convert(Ptr{Lib.dnnl_memory_desc_t}, Base.pointer_from_objref(x)) +end + +memory_descriptor(x::MemoryDesc) = x +function Base.cconvert(::Type{Ptr{Lib.dnnl_memory_desc_t}}, x::MemoryDesc) + return Base.cconvert(Ptr{Lib.dnnl_memory_desc_t}, Ref(x.handle)) +end + +function Base.eltype(md::MemoryDesc) + result = Ref{Lib.dnnl_data_type_t}() + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_data_type, result) + return dnnl_type_to_julia(unwrap_ref(result)) +end + +function Base.size(md::MemoryDesc) + result = Ref(Vector{Int64}(undef, Lib.DNNL_MAX_NDIMS)) + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_dims, result) + return Tuple(reverse(result[][1:ndims(md)])) +end + +function Base.strides(md::MemoryDesc) + result = Ref(Vector{Int64}(undef, Lib.DNNL_MAX_NDIMS)) + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_strides, result) + return Tuple(reverse(result[][1:ndims(md)])) +end + +function Base.ndims(md::MemoryDesc) + result = Ref{Lib.dnnl_dim_t}() + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_ndims_s32, result) + return Int(unwrap_ref(result)) +end + +function padded_size(md::MemoryDesc) + result = Ref(Vector{Int64}(undef, Lib.DNNL_MAX_NDIMS)) + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_padded_dims, result) + padded_dims = result[] + return Tuple(reverse(padded_dims[1:findlast(!=(0), padded_dims)])) +end + +function padded_offsets(md::MemoryDesc) + result = Ref(Vector{Int64}(undef, Lib.DNNL_MAX_NDIMS)) + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_padded_offsets, result) + return Tuple(reverse(result[][1:ndims(md)])) +end + +function format_kind(md::MemoryDesc) + result = Ref{Lib.dnnl_format_kind_t}() + @dnnlcall Lib.dnnl_memory_desc_query(md, Lib.dnnl_query_format_kind, result) + return unwrap_ref(result) +end + +function print_memory_descriptor(io::IO, md::MemoryDesc, level::Int=0) + base_desc = "oneDNN Memory Description:" + # TODO: Additional information if the format is "blocked" + join_str = "\n" * " "^(level + 1) + ndims_str = "ndims: $(ndims(md))" + size_str = "size: $(size(md))" + datatype_str = "datatype: $(eltype(md))" + format_kind_str = "format kind: $(format_kind(md))" + padded_dims_str = "padded dims: $(padded_size(md))" + padded_offsets_str = "padded offsets: $(padded_offsets(md))" + desc = join( + [base_desc, ndims_str, size_str, datatype_str, + format_kind_str, padded_dims_str, padded_offsets_str], + join_str) + print(io, desc) +end + +function Base.show(io::IO, ::MIME"text/plain", md::MemoryDesc) + print_memory_descriptor(io, md) +end + +memory_descriptor(x::AbstractArray{T}) where {T} = memory_descriptor(T, size(x), strides(x)) + +function memory_descriptor( + ::Type{T}, dims::Dims{N}, strides::Dims{N}=default_strides(dims)) where {T, N} + handle = Ref{Lib.dnnl_memory_desc_t}() + @dnnlcall dnnl_memory_desc_create_with_strides( + handle, N, reverse(dims), T, reverse(strides)) + return MemoryDesc(unwrap_ref(handle)) +end + +# convenience creation by tag. +function memory_descriptor( + ::Type{T}, dims::Dims{N}, tag::Union{Lib.dnnl_format_tag_t, UInt32}) where {T, N} + handle = Ref{Lib.dnnl_memory_desc_t}() + @dnnlcall dnnl_memory_desc_create_with_tag(handle, N, reverse(dims), T, tag) + return MemoryDesc(unwrap_ref(handle)) +end + +# toany(a::MemoryDesc) = memorydesc(a.data_type, logicalsize(a), dnnl_format_any()) + +# isany(a::Ptr{MemoryDesc}) = isany(unsafe_load(a)) +# isany(a::MemoryDesc) = a.format_kind == Lib.dnnl_format_kind_any + +# function Base.:(==)(a::MaybeRef{MemoryDesc}, b::MaybeRef{MemoryDesc}) +# return Bool(Lib.dnnl_memory_desc_equal(wrap_ref(a), wrap_ref(b))) +# end + +function get_bytes(a::MaybeRef{MemoryDesc}) + return signed(Lib.dnnl_memory_desc_get_size(unwrap_ref(a).handle)) +end + +# Memory Type for oneDNN -- distinct from Memory in Base +struct Memory{T, N, A <: AbstractArray{T}} <: AbstractArray{T, N} + # The underlying array that is supplying the data. + array::A + offset::Int + + # Keep around some information about size and padding. + logicalsize::Dims{N} + + # Memory object from DNNL + memory::MemoryPtr +end + +ArrayInterface.fast_scalar_indexing(::Type{<:Memory}) = false +ArrayInterface.can_setindex(::Type{<:Memory}) = false + +function Base.convert(::Type{Memory{T, N, A}}, x::Memory{T, N, B}) where {T, N, A, B} + return Memory(convert(A, x.array), x.offset, x.logicalsize, x.memory) +end + +memory_descriptor(x::Memory) = MemoryDesc(memory_descriptor_ptr(x)) + +Base.sizeof(x::Memory) = get_bytes(memory_descriptor(x)) + +# toany(x::Memory) = toany(memorydesc(x)) + +Base.size(x::Memory) = x.logicalsize +# logicalsize(x::Memory) = size(x) +Base.strides(x::Memory) = strides(memory_descriptor(x)) +# padded_size(x::Memory{T,N}) where {T,N} = padded_size(memorydesc(x), Val(N)) + +Base.parent(x::Memory) = x.array +# function ChainRulesCore.rrule(::typeof(Base.parent), x::Memory) +# return parent(x), Δ -> (ChainRulesCore.NoTangent(), Δ) +# end + +# arraytype(::Memory{T,N,A}) where {T,N,A} = A + +function Base.show(io::IO, x::Memory) + print(io, "Opaque Memory with ") + print_memory_descriptor(io, memory_descriptor(x)) + x.offset != 1 && print(io, " - SubArray") + return +end +Base.show(io::IO, ::MIME"text/plain", x::Memory) = show(io, x) + +# Base.any(f::F, x::Memory) where {F <: Function} = any(f, materialize(x)) + +# for creating OneDNN arguments +# @inline access_pointer(x, offset, context) = pointer(x, offset) +# function setptr!(x::Memory{T}, context::AccessContext = Reading()) where {T} +# ptr = access_pointer(x.array, x.offset, context) +# @apicall dnnl_memory_set_data_handle_v2(x.memory, ptr, global_stream()) +# end + +# function Base.cconvert( +# ::Type{T}, x::Memory +# ) where {T<:Union{Lib.dnnl_memory_t,Ptr{Lib.dnnl_memory_t}}} +# setptr!(x) +# return Base.cconvert(T, x.memory) +# end + +# Base.cconvert(::Type{Ptr{Lib.dnnl_memory_desc_t}}, x::Memory) = memorydesc_ptr(x) + +# Base.elsize(::Type{<:Memory{T}}) where {T} = sizeof(T) +# function Base.unsafe_convert(::Type{Ptr{T}}, x::Memory{T}) where {T} +# return pointer(x.array) +# end + +# # For constructing DNNL arguments. +# function dnnl_exec_arg(x::Memory, context::AccessContext = Reading()) +# setptr!(x, context) +# return x.memory +# end + +# Try to remove as many layers of wrapping around `A` as possible. +# Since all of the dimension and layout information will be stored in the OneDNN +# `memorydesc`, we don't need to hold onto it on the Julia level, which can potentially +# cause down-stream type instabilities. +Memory(A::AbstractArray) = Memory(ancestor(A), offset(A), size(A), MemoryPtr(A)) + +offset(::AbstractArray) = one(Int64) +offset(x::SubArray) = Base.first_index(x) + +Memory(M::Memory) = M + +# function ChainRulesCore.rrule(::Type{<:Memory}, x) +# return (Memory(x), Δ -> (ChainRulesCore.NoTangent(), Δ)) +# end + +# # Convenience method for creating destination memories from a source memory. +# Base.size(M::Memory) = M.logicalsize +# Base.eltype(M::Memory{T}) where {T} = T + +function Base.getindex(::Memory, I::Vararg{Int, N}) where {N} + throw(ArgumentError("Cannot index opaque memory formats.")) +end + +function Base.setindex!(::Memory, v, I::Vararg{Int, N}) where {N} + throw(ArgumentError("Cannot index opaque memory formats.")) +end + +memory_descriptor(M::Memory) = MemoryDesc(memory_descriptor_ptr(M)) +function memory_descriptor_ptr(M::Memory) + md = Ref{Lib.dnnl_memory_desc_t}() + @dnnlcall Lib.dnnl_memory_get_memory_desc(M.memory, md) + return unwrap_ref(md) +end + +# ##### +# ##### Lazy Transpose +# ##### + +# # General idea: swap the dims and strides. +# # TODO: Need to validate that this is a blocked layout with no tiling ... +# function Base.adjoint(M::Memory{T,2}) where {T} +# dims = size(M) +# strides = Base.strides(memorydesc(M), Val(2)) + +# reversed_dims = reverse(dims) +# desc = memorydesc(T, reversed_dims, reverse(strides)) +# memory = MemoryPtr(parent(M), desc) +# return Memory(parent(M), M.offset, reversed_dims, memory) +# end + +# function Base.permutedims(M::Memory{T,N}, perm::NTuple{N,Int}) where {T,N} +# dims = size(M) +# strides = Base.strides(memorydesc(M), Val(N)) +# dims_permuted = unsafe_permute(dims, perm) +# strides_permuted = unsafe_permute(strides, perm) + +# desc = memorydesc(T, dims_permuted, strides_permuted) +# memory = MemoryPtr(parent(M), desc) +# return Memory(parent(M), M.offset, dims_permuted, memory) +# end + +# function unsafe_permute(a::NTuple{N,Int}, b::NTuple{N,Int}) where {N} +# return ntuple(i -> @inbounds(a[@inbounds b[i]]), Val(N)) +# end + +# ##### +# ##### Construct more memories!! +# ##### + +# function Base.similar( +# x::Memory{U,M}, +# ::Type{T} = eltype(x), +# dims::NTuple{N,Int} = size(x), +# desc::MemoryDesc = (M == N && U === T) ? memorydesc(x) : memorydesc(T, dims), +# ) where {U,T,M,N} +# # Number of bytes to allocate. +# # Since OneDNN is free to reorder and pad, we need to explicitly ask it. +# bytes = getbytes(desc) + +# # Allocate the output array. +# # This will be allocated as just a plain vector with dimensions padded with ones so it +# # has the same dimension as the wrapped "Memory" +# padded_dims = (div(bytes, sizeof(T)), ntuple(_ -> 1, Val(N - 1))...) +# out = similar(ancestor(x), T, padded_dims) + +# # Since we specifically created this array, the offset will always start at one. +# return Memory(out, 1, dims, MemoryPtr(out, desc)) +# end + +# Base.similar(x::Memory{T,M}, dims::NTuple{N,Int}) where {T,M,N} = similar(x, T, dims) +# function Base.similar(x::Memory{T,M}, dims::NTuple{N,Int}, desc::MemoryDesc) where {T,M,N} +# return similar(x, T, dims, desc) +# end + +# materialize(x::AbstractArray, args...; kw...) = x +# function Array(M::Memory{T, N}) where {T, N} +# # Check if this memory is already in the requested layout. +# # If so, return the underlying array. +# desired_strides = default_strides(size(M)) +# actual_strides = strides(M) + +# # In order to return the underlying object, we need to ensure that: +# # 1. The length of the wrapped object is the same as the length of the Memory. +# # This helps handle views correctly. +# # +# # 2. Strides are the same +# if length(parent(M)) == length(M) && desired_strides == actual_strides +# return reshape(parent(M), size(M)) +# end + +# desc = memory_descriptor(T, size(M), desired_strides) +# end +# function materialize(M::Memory{T,N}; allowreorder = true) where {T,N} +# # Check if this memory is already in the requested layout. +# # If so, return the underlying array. +# desired_strides = default_strides(logicalsize(M)) +# actual_strides = strides(M) + +# # In order to return the underlying object, we need to ensure that: +# # 1. The length of the wrapped object is the same as the length of the Memory. +# # This helps handle views correctly. +# # +# # 2. Strides are the same[ +# if length(parent(M)) == length(M) && desired_strides == actual_strides +# return reshape(parent(M), logicalsize(M)) +# end + +# if !allowreorder +# msg = """ +# Expected strides: $desired_strides. +# Found strides: $actual_strides. +# """ +# throw(ArgumentError(msg)) +# end + +# desc = memorydesc(T, logicalsize(M), desired_strides) +# return reshape(parent(reorder(desc, M)), logicalsize(M)) +# end + +# function ChainRulesCore.rrule( +# ::typeof(materialize), x, args::Vararg{Any,N}; kw... +# ) where {N} +# return materialize(x, args...; kw...), +# Δ -> (ChainRulesCore.NoTangent(), Δ, ntuple(_ -> ChainRulesCore.NoTangent(), Val(N))) +# end + +# ##### +# ##### Reshape +# ##### + +# function Base.reshape(memory::Memory{T}, dims::NTuple{N,Int}) where {T,N} +# md = Ref{MemoryDesc}() +# @apicall dnnl_memory_desc_reshape(md, memory, N, Ref(reverse(dims))) +# new_memory = MemoryPtr(parent(memory), md) +# return Memory(parent(memory), memory.offset, dims, new_memory) +# end diff --git a/src/onednn/oneDNN.jl b/src/onednn/oneDNN.jl new file mode 100644 index 00000000..3792d268 --- /dev/null +++ b/src/onednn/oneDNN.jl @@ -0,0 +1,27 @@ +module oneDNN + +using ArrayInterface: ArrayInterface +using ..Utils: ancestor + +include("lib.jl") # Low-level bindings to oneDNN C API -- automatically generated + +include("utils.jl") + +include("types.jl") +include("memory.jl") + +include("api.jl") + +const GLOBAL_ENGINE_INITIALIZED = Ref{Bool}(false) +const GLOBAL_ENGINE = Ref{Engine}() + +function __init__() + # Initialize the global engine. + GLOBAL_ENGINE[] = engine() + GLOBAL_ENGINE_INITIALIZED[] = true + + # Set the default math mode. We set to the fastest mode. + set_math_mode!(:fastest) +end + +end diff --git a/src/onednn/types.jl b/src/onednn/types.jl new file mode 100644 index 00000000..84916205 --- /dev/null +++ b/src/onednn/types.jl @@ -0,0 +1,63 @@ +@wrap_type MemoryPtr dnnl_memory_t dnnl_memory_destroy + +function MemoryPtrNoFinalizer(A::AbstractArray, desc=memory_descriptor(A)) + return MemoryPtrNoFinalizer(convert(Ptr{Nothing}, pointer(A)), desc) +end + +function MemoryPtrNoFinalizer(ptr::Ptr{Nothing}, desc) + memory = MemoryPtr(InnerConstructor()) + @dnnlcall dnnl_memory_create(memory, desc, global_engine(), ptr) + return memory +end + +@wrap_type Engine dnnl_engine_t dnnl_engine_destroy + +function EngineNoFinalizer(kind=Lib.dnnl_cpu, index=0) + engine = Engine(InnerConstructor()) + @dnnlcall dnnl_engine_create(engine, kind, index) + return engine +end + +@wrap_type Stream dnnl_stream_t dnnl_stream_destroy + +function StreamNoFinalizer(engine::Engine) + stream = Stream(InnerConstructor()) + @dnnlcall dnnl_stream_create(stream, engine, Lib.dnnl_stream_default_flags) + return stream +end + +@wrap_type Attributes dnnl_primitive_attr_t dnnl_primitive_attr_destroy + +function AttributesNoFinalizer() + attributes = Attributes(InnerConstructor()) + @dnnlcall dnnl_primitive_attr_create(attributes) + @dnnlcall dnnl_primitive_attr_set_scratchpad_mode( + attributes, Lib.dnnl_scratchpad_mode_user) + return attributes +end + +@wrap_type PostOps dnnl_post_ops_t dnnl_post_ops_destroy + +function PostOpsNoFinalizer() + postops = PostOps(InnerConstructor()) + @dnnlcall dnnl_post_ops_create(postops) + return postops +end + +@wrap_type PrimitiveDescriptor dnnl_primitive_desc_t dnnl_primitive_desc_destroy + +function PrimitiveDescriptorNoFinalizer(args...) + return PrimitiveDescriptorNoFinalizer(Lib.dnnl_primitive_desc_create, args...) +end + +function PrimitiveDescriptorNoFinalizer(f::F, args...) where {F <: Function} + descriptor = PrimitiveDescriptor(InnerConstructor()) + @dnnlcall f(descriptor, args...) + return descriptor +end + +function Base.copy(x::PrimitiveDescriptor) + descriptor = PrimitiveDescriptor(InnerConstructor()) + @dnnlcall dnnl_primitive_desc_clone(descriptor, x) + return descriptor +end diff --git a/src/onednn/utils.jl b/src/onednn/utils.jl new file mode 100644 index 00000000..7aba8287 --- /dev/null +++ b/src/onednn/utils.jl @@ -0,0 +1,129 @@ +macro dnnlcall(ex) + expr = dnnlcall_partial_impl(ex) + return quote + status = $(expr) + if status != Lib.dnnl_success + throw(ErrorException("oneDNN call failed with status $(status).")) + end + status + end +end + +function dnnlcall_partial_impl(expr) + expr.head != :call && error("Only call `@dnnlcall` on function calls") + + # Prefix "Lib." in front of the function call. + # However, sometimes the function to call is passed as a higher order function. + # Thus, we only implicitly attach "Lib" is the function name starts with "dnnl". + fname = expr.args[1] + if isa(fname, Symbol) + fname = startswith(string(fname), "dnnl") ? :(Lib.$(fname)) : :($(esc(fname))) + end + + # Escape and convert each of the arguments. + args = expr.args[2:end] + for i in eachindex(args) + # Handle splats. + arg = args[i] + if isa(arg, Expr) && arg.head == :... + args[i] = :(dnnl_convert($(esc(arg.args[1]))...)...) + else + args[i] = :(dnnl_convert($(esc(args[i])))) + end + end + return :($fname($(args...))) +end + +struct InnerConstructor end + +macro wrap_type(jl_name, c_name, destructor) + lower_constructor_name = Symbol(jl_name, :NoFinalizer) + + # Automatically add the "Lib" prefix if required. + c_name isa Symbol && (c_name = :(Lib.$(c_name))) + + return esc(quote + # Type definition + mutable struct $(jl_name) + handle::$(c_name) + $(jl_name)(::InnerConstructor) = new($(c_name)()) + end + + # Use a trick of Lower and Higher constructeors. + # Lower constructors should have the name `$(jl_name)NoFinalizer` and not + # attach finalizers. + # + # The higher constructor will simply forward to the lower constructor but + # attach a finalizer before returning. + function $(jl_name)(args...) + x = $(lower_constructor_name)(args...) + attach_finalizer!(x) + return x + end + + # Finalizer + destroy(x::$(jl_name)) = @dnnlcall $(destructor)(x) + attach_finalizer!(x::$(jl_name)) = finalizer(destroy, x) + + # Conversion functions + Base.unsafe_convert(::Type{$(c_name)}, x::$(jl_name)) = x.handle + function Base.unsafe_convert(::Type{Ptr{$(c_name)}}, x::$(jl_name)) + return Base.unsafe_convert(Ptr{$(c_name)}, Base.pointer_from_objref(x)) + end + end) +end + +const MaybeRef{T} = Union{Ref{T}, T} +const MaybePtr{T} = Union{Ptr{T}, T} + +wrap_ref(x::Ref) = x +wrap_ref(x) = Ref(x) + +unwrap_ref(x::Ref) = x[] +unwrap_ref(x) = x + +dnnl_type(::Type{Float16}) = Lib.dnnl_f16 +dnnl_type(::Type{Float32}) = Lib.dnnl_f32 +dnnl_type(::Type{Float64}) = Lib.dnnl_f64 +dnnl_type(::Type{Int32}) = Lib.dnnl_s32 +dnnl_type(::Type{Int8}) = Lib.dnnl_s8 +dnnl_type(::Type{UInt8}) = Lib.dnnl_u8 +dnnl_type(::Type{Bool}) = Lib.dnnl_boolean +dnnl_type(::T) where {T <: Number} = dnnl_type(T) +dnnl_type(::Type{T}) where {T} = error("No DNNL type for type $T") +dnnl_type(::T) where {T} = error("No DNNL type for $T") + +function dnnl_type_to_julia(x::Lib.dnnl_data_type_t) + x == Lib.dnnl_f16 && return Float16 + x == Lib.dnnl_f32 && return Float32 + x == Lib.dnnl_f64 && return Float64 + x == Lib.dnnl_s32 && return Int32 + x == Lib.dnnl_s8 && return Int8 + x == Lib.dnnl_u8 && return UInt8 + x == Lib.dnnl_boolean && return Bool + error("No Julia type for DNNL type $x") +end + +dnnl_convert(x) = x +dnnl_convert(x, y...) = (dnnl_convert(x), dnnl_convert.(y...)...) +dnnl_convert(::Type{T}) where {T} = dnnl_type(T) +dnnl_convert(x::Dims{N}) where {N} = Ref(dnnl_dims(x)) +# dnnl_convert(x::NTuple{N, oneDNNMemoryDesc}) where {N} = Ref(x) + +# Make a DIMS array +# NOTE: The OneDNN C-API expects a pointer, so we can't just pass a tuple. +# We either need to pass an array, or a Ref{Tuple}. +# Hwere, we choose to do the latter. +function dnnl_dims(x::Dims{N}) where {N} + f(i) = i ≤ length(x) ? Lib.dnnl_dim_t(x[i]) : zero(Lib.dnnl_dim_t) + return ntuple(f, Val(Lib.DNNL_MAX_NDIMS)) +end +dnnl_dims(x::Dims{Lib.DNNL_MAX_NDIMS}) = x + +dnnl_dims(x::AbstractArray) = dnnl_dims(strides(x)) +dnnl_dims() = ntuple(Returns(zero(Int64)), Val(Lib.DNNL_MAX_NDIMS)) +dnnl_dims(::Tuple{}) = dnnl_dims() + +# Formats +default_strides(size::Tuple{Vararg{Int, N}}) where {N} = Base.size_to_strides(1, size...) +# dnnl_format_any() = Lib.dnnl_format_tag_any diff --git a/src/utils.jl b/src/utils.jl index 0a94d8c5..0aaf0f98 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -319,4 +319,10 @@ end CRC.@non_differentiable static_training_mode_check(::Any...) +function ancestor(x::AbstractArray) + p = parent(x) + p === x && return x + return ancestor(p) +end + end