diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/SqueezeNetObjectDetectionCPPNonWinRT.vcxproj b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/SqueezeNetObjectDetectionCPPNonWinRT.vcxproj
new file mode 100644
index 00000000..b848da9a
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/SqueezeNetObjectDetectionCPPNonWinRT.vcxproj
@@ -0,0 +1,175 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {73B73100-4B52-4073-A4AA-289158526A19}
+ Win32Proj
+ SqueezeNetObjectDetectionCPPNonWinRT
+ 10.0.19041.0
+
+
+
+ Application
+ true
+ v141
+ Unicode
+
+
+ Application
+ false
+ v141
+ true
+ Unicode
+
+
+ Application
+ true
+ v141
+ Unicode
+
+
+ Application
+ false
+ v141
+ true
+ Unicode
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+
+
+ true
+
+
+ false
+
+
+ false
+
+
+
+
+
+ Level3
+ Disabled
+ true
+ _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+ stdcpp14
+ ..\..\..\..\SampleSharedLib\SampleSharedLib;$(MSBuildThisFileDirectory)../../build/native/include/;%(AdditionalIncludeDirectories)
+
+
+ Console
+ true
+ runtimeobject.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+ Level3
+ Disabled
+ true
+ _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+ ..\..\..\..\SampleSharedLib\SampleSharedLib;$(MSBuildThisFileDirectory)../../build/native/include/;%(AdditionalIncludeDirectories)
+ stdcpp14
+
+
+ Console
+ true
+ runtimeobject.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+ ..\..\..\..\SampleSharedLib\SampleSharedLib;$(MSBuildThisFileDirectory)../../build/native/include/;%(AdditionalIncludeDirectories)
+ stdcpp14
+
+
+ Console
+ true
+ true
+ true
+ runtimeobject.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+ true
+ ..\..\..\..\SampleSharedLib\SampleSharedLib;$(MSBuildThisFileDirectory)../../build/native/include/;%(AdditionalIncludeDirectories)
+ stdcpp14
+
+
+ Console
+ true
+ true
+ true
+ runtimeobject.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+ {12103a5b-677a-4286-83d2-54eab9010c16}
+
+
+
+
+
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/SqueezeNetObjectDetectionCPPNonWinRT.vcxproj.filters b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/SqueezeNetObjectDetectionCPPNonWinRT.vcxproj.filters
new file mode 100644
index 00000000..128a3866
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/SqueezeNetObjectDetectionCPPNonWinRT.vcxproj.filters
@@ -0,0 +1,22 @@
+
+
+
+
+ {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
+ cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
+
+
+ {93995380-89BD-4b04-88EB-625FBE52EBFB}
+ h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
+
+
+ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
+ rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
+
+
+
+
+ Source Files
+
+
+
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/main.cpp b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/main.cpp
new file mode 100644
index 00000000..9a216e3b
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/main.cpp
@@ -0,0 +1,68 @@
+#include
+#include
+#include "Windows.AI.MachineLearning.Native.h"
+#include "Windows.AI.MachineLearning.h"
+#include "raw/windows.ai.machinelearning.h"
+#include "raw/windows.ai.machinelearning.gpu.h"
+#include
+#include
+#include
+
+namespace ml = Windows::AI::MachineLearning;
+
+EXTERN_C IMAGE_DOS_HEADER __ImageBase;
+std::wstring GetModulePath() {
+ std::wstring val;
+ wchar_t modulePath[MAX_PATH] = { 0 };
+ GetModuleFileNameW((HINSTANCE)&__ImageBase, modulePath, _countof(modulePath));
+ wchar_t drive[_MAX_DRIVE];
+ wchar_t dir[_MAX_DIR];
+ wchar_t filename[_MAX_FNAME];
+ wchar_t ext[_MAX_EXT];
+ _wsplitpath_s(modulePath, drive, _MAX_DRIVE, dir, _MAX_DIR, filename,
+ _MAX_FNAME, ext, _MAX_EXT);
+
+ val = drive;
+ val += dir;
+
+ return val;
+}
+
+int main()
+{
+ std::wstring model_path = GetModulePath() + L".\\SqueezeNet.onnx";
+
+ std::unique_ptr model = std::make_unique(model_path.c_str(), model_path.size());
+ std::unique_ptr device = std::make_unique();
+
+ const wchar_t input_name[] = L"data_0";
+ const wchar_t output_name[] = L"softmaxout_1";
+
+ std::unique_ptr session = std::make_unique(*model, *device);
+
+ std::unique_ptr binding = std::make_unique(*session.get());
+
+ auto input_shape = std::vector{ 1, 3, 224, 224 };
+
+ auto input_data = std::vector(1 * 3 * 224 * 224);
+ auto output_shape = std::vector{ 1, 1000, 1, 1 };
+
+ std::iota(begin(input_data), end(input_data), 0.f);
+
+ binding->bind(
+ input_name, _countof(input_name) - 1,
+ input_shape.data(), input_shape.size(),
+ input_data.data(), input_data.size());
+
+
+ ml::learning_model_results results = session->evaluate(*binding.get());
+ float* p_buffer = nullptr;
+ size_t buffer_size = 0;
+ bool succeeded = 0 == results.get_output(
+ output_name,
+ _countof(output_name) - 1,
+ reinterpret_cast(&p_buffer),
+ &buffer_size);
+
+ printf("Windows Machine Learning succeeded: %s\n", succeeded ? "TRUE" : "FALSE");
+}
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/weak_buffer.h b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/weak_buffer.h
new file mode 100644
index 00000000..701e22ab
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/weak_buffer.h
@@ -0,0 +1,69 @@
+// Copyright 2019 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef WEAK_BUFFER_H
+#define WEAK_BUFFER_H
+
+#include
+#include
+
+#include
+#include
+
+namespace Windows { namespace AI { namespace MachineLearning { namespace Details {
+
+template
+struct weak_buffer
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags,
+ ABI::Windows::Storage::Streams::IBuffer,
+ Windows::Storage::Streams::IBufferByteAccess> {
+private:
+ const T* m_p_begin;
+ const T* m_p_end;
+
+public:
+ HRESULT RuntimeClassInitialize(_In_ const T* p_begin, _In_ const T* p_end) {
+ m_p_begin = p_begin;
+ m_p_end = p_end;
+
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE get_Capacity(
+ UINT32 * value)
+ {
+ if (value == nullptr) {
+ return E_POINTER;
+ }
+
+ *value = static_cast(m_p_end - m_p_begin) * sizeof(T);
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE get_Length(
+ UINT32 * /*value*/)
+ {
+ return E_NOTIMPL;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE put_Length(
+ UINT32 /*value*/)
+ {
+ return E_NOTIMPL;
+ }
+
+ STDMETHOD(Buffer)(uint8_t** value)
+ {
+ if (value == nullptr) {
+ return E_POINTER;
+ }
+
+ *value = reinterpret_cast(const_cast(m_p_begin));
+ return S_OK;
+ }
+};
+
+}}}} // namespace Windows::AI::MachineLearning::Details
+
+#endif // WEAK_BUFFER_H
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/weak_single_threaded_iterable.h b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/weak_single_threaded_iterable.h
new file mode 100644
index 00000000..3312f652
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/weak_single_threaded_iterable.h
@@ -0,0 +1,121 @@
+// Copyright 2019 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef WEAK_SINGLE_THREADED_ITERABLE_H_
+#define WEAK_SINGLE_THREADED_ITERABLE_H_
+
+#include
+#include
+
+
+namespace Windows { namespace AI { namespace MachineLearning { namespace Details {
+
+template
+struct weak_single_threaded_iterable
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags,
+ ABI::Windows::Foundation::Collections::IIterable> {
+private:
+ const T* m_p_begin;
+ const T* m_p_end;
+
+public:
+ HRESULT RuntimeClassInitialize(_In_ const T* p_begin, _In_ const T* p_end) {
+ m_p_begin = p_begin;
+ m_p_end = p_end;
+
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE First(
+ _Outptr_result_maybenull_
+ ABI::Windows::Foundation::Collections::IIterator** first) {
+ if (first == nullptr) {
+ return E_POINTER;
+ }
+
+ Microsoft::WRL::ComPtr iterator;
+ auto hr = Microsoft::WRL::MakeAndInitialize(
+ &iterator, this);
+
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ return iterator.CopyTo(first);
+ }
+
+ HRESULT Size(unsigned* p_size) {
+ if (p_size == nullptr) {
+ return E_POINTER;
+ }
+
+ *p_size = static_cast(m_p_end - m_p_begin);
+ return S_OK;
+ }
+
+ HRESULT At(unsigned index, _Out_ T* p_current) {
+ if (p_current == nullptr) {
+ return E_POINTER;
+ }
+
+ *p_current = *(m_p_begin + index);
+ return S_OK;
+ }
+
+ HRESULT Has(unsigned index, _Out_ boolean* p_has_current) {
+ if (p_has_current == nullptr) {
+ return E_POINTER;
+ }
+ unsigned size;
+ auto hr = Size(&size);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ *p_has_current = index < size;
+ return S_OK;
+ }
+
+ struct weak_single_threaded_iterator
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags,
+ ABI::Windows::Foundation::Collections::IIterator> {
+ private:
+ Microsoft::WRL::ComPtr
+ m_weak_single_threaded_iterable;
+ unsigned m_current = 0;
+
+ public:
+ HRESULT RuntimeClassInitialize(
+ _In_ weak_single_threaded_iterable* p_weak_single_threaded_iterable) {
+ m_weak_single_threaded_iterable = p_weak_single_threaded_iterable;
+ return S_OK;
+ }
+
+ virtual /* propget */ HRESULT STDMETHODCALLTYPE
+ get_Current(_Out_ T* current) {
+ return m_weak_single_threaded_iterable->At(m_current, current);
+ }
+
+ virtual /* propget */ HRESULT STDMETHODCALLTYPE
+ get_HasCurrent(_Out_ boolean* hasCurrent) {
+ return m_weak_single_threaded_iterable->Has(m_current, hasCurrent);
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE MoveNext(_Out_ boolean* hasCurrent) {
+ if (SUCCEEDED(
+ m_weak_single_threaded_iterable->Has(m_current, hasCurrent)) &&
+ *hasCurrent) {
+ m_current++;
+ return m_weak_single_threaded_iterable->Has(m_current, hasCurrent);
+ }
+ return S_OK;
+ }
+ };
+};
+
+}}}} // namespace Windows::AI::MachineLearning::Details
+
+#endif // WEAK_SINGLE_THREADED_ITERABLE_H_
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/windows.ai.machinelearning.gpu.h b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/windows.ai.machinelearning.gpu.h
new file mode 100644
index 00000000..e55dca15
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/windows.ai.machinelearning.gpu.h
@@ -0,0 +1,58 @@
+#pragma once
+
+#ifndef MICROSOFT_AI_MACHINELEARNING_GPU_H_
+#define MICROSOFT_AI_MACHINELEARNING_GPU_H_
+
+#include "windows.ai.machinelearning.h"
+
+namespace Windows { namespace AI { namespace MachineLearning { namespace gpu {
+
+enum directx_device_kind { directx, directx_high_power, directx_min_power };
+
+struct directx_device : public learning_model_device
+{
+ directx_device(directx_device_kind kind) :
+ learning_model_device(create_device(kind))
+ {}
+
+ directx_device(ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice* d3dDevice) :
+ learning_model_device(create_device(d3dDevice))
+ {}
+
+ directx_device(ID3D12CommandQueue* queue) :
+ learning_model_device(create_device(queue))
+ {}
+
+private:
+ static Details::learning_model_device create_device(
+ directx_device_kind kind)
+ {
+ switch (kind)
+ {
+ case directx_device_kind::directx:
+ return Details::learning_model_device::create_directx_device();
+ case directx_device_kind::directx_high_power:
+ return Details::learning_model_device::create_directx_high_power_device();
+ case directx_device_kind::directx_min_power:
+ return Details::learning_model_device::create_directx_min_power_device();
+ };
+
+ return Details::learning_model_device();
+ }
+
+ static Details::learning_model_device create_device(
+ ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice* d3dDevice)
+ {
+ return Details::learning_model_device::create_directx_device(d3dDevice);
+ }
+
+ static Details::learning_model_device create_device(
+ ID3D12CommandQueue* queue)
+ {
+ return Details::learning_model_device::create_directx_device(queue);
+ }
+};
+
+}}}} // namespace Windows::AI::MachineLearning::gpu
+
+#endif //Windows_AI_MACHINELEARNING_GPU_H
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/windows.ai.machinelearning.h b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/windows.ai.machinelearning.h
new file mode 100644
index 00000000..a34b5210
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/windows.ai.machinelearning.h
@@ -0,0 +1,155 @@
+#pragma once
+
+#ifndef WINDOWS_AI_MACHINELEARNING_H_
+#define WINDOWS_AI_MACHINELEARNING_H_
+
+#define ML_FAIL_FAST_IF(condition) \
+ do { \
+ bool _cond = condition; \
+ if (_cond) { \
+ __fastfail(0); \
+ } \
+ } while(0)
+
+namespace Windows { namespace AI { namespace MachineLearning {
+ using tensor_shape_type = int64_t;
+}}} // namespace Windows::AI::MachineLearning
+
+
+#include "winml_windows.h"
+
+namespace Windows { namespace AI { namespace MachineLearning { namespace Details {
+ using learning_model = WinMLLearningModel;
+ using learning_model_device = WinMLLearningModelDevice;
+ using learning_model_session = WinMLLearningModelSession;
+ using learning_model_binding = WinMLLearningModelBinding;
+ using learning_model_results = WinMLLearningModelResults;
+}}}} // namespace Windows::AI::MachineLearning::Details
+
+namespace Windows { namespace AI { namespace MachineLearning {
+
+struct learning_model
+{
+ friend struct learning_model_session;
+
+ learning_model(const wchar_t* model_path, size_t size) :
+ m_model(model_path, size)
+ {}
+
+ learning_model(const char* bytes, size_t size) :
+ m_model(bytes, size)
+ {}
+
+private:
+ Details::learning_model m_model;
+};
+
+struct learning_model_results
+{
+ friend struct learning_model_session;
+
+ int32_t get_output(const wchar_t* feature_name, size_t feature_name_size, void** pp_buffer, size_t* p_capacity)
+ {
+ return m_results.get_output(feature_name, feature_name_size, pp_buffer, p_capacity);
+ }
+
+private:
+ learning_model_results(Details::learning_model_results results) :
+ m_results(results) {}
+
+private:
+ Details::learning_model_results m_results;
+};
+
+struct learning_model_device
+{
+ friend struct learning_model_session;
+
+ learning_model_device() : m_device(){}
+
+ learning_model_device(learning_model_device&& device) :
+ m_device(std::move(device.m_device))
+ {}
+
+ learning_model_device(learning_model_device& device) :
+ m_device(device.m_device)
+ {}
+
+ void operator=(learning_model_device& device)
+ {
+ m_device = device.m_device;
+ }
+
+protected:
+ learning_model_device(Details::learning_model_device&& learning_model_device) :
+ m_device(std::move(learning_model_device)){}
+
+private:
+ Details::learning_model_device m_device;
+};
+
+struct learning_model_session
+{
+ friend struct learning_model_binding;
+
+ learning_model_session(const learning_model& model) :
+ m_session(model.m_model)
+ {}
+
+ learning_model_session(const learning_model& model, const learning_model_device& device) :
+ m_session(model.m_model, device.m_device)
+ {}
+
+ inline learning_model_results evaluate(learning_model_binding& binding);
+
+private:
+ Details::learning_model_session m_session;
+
+};
+
+struct learning_model_binding
+{
+ friend struct learning_model_session;
+
+ learning_model_binding(const learning_model_session& session) :
+ m_binding(session.m_session)
+ {}
+
+ template
+ int32_t bind_as_reference(
+ const wchar_t* feature_name, size_t feature_name_size,
+ tensor_shape_type* p_shape, size_t shape_size,
+ T* p_data, size_t data_size)
+ {
+ return m_binding.bind_as_reference(feature_name, feature_name_size, p_shape, shape_size, p_data, data_size);
+ }
+
+ template
+ int32_t bind(
+ const wchar_t* feature_name, size_t feature_name_size,
+ tensor_shape_type* p_shape, size_t shape_size,
+ T* p_data, size_t data_size)
+ {
+ return m_binding.bind(feature_name, feature_name_size, p_shape, shape_size, p_data, data_size);
+ }
+
+ template
+ int32_t bind(
+ const wchar_t* feature_name, size_t feature_name_size,
+ tensor_shape_type* p_shape, size_t shape_size)
+ {
+ return m_binding.bind(feature_name, feature_name_size, p_shape, shape_size);
+ }
+
+private:
+ Details::learning_model_binding m_binding;
+};
+
+learning_model_results learning_model_session::evaluate(learning_model_binding& binding)
+{
+ return Details::learning_model_results(m_session.evaluate(binding.m_binding));
+}
+
+}}} // namespace Windows::AI::MachineLearning::Details
+
+#endif // Windows_AI_MACHINELEARNING_H_
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/winml_windows.h b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/winml_windows.h
new file mode 100644
index 00000000..a8b8c262
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/raw/winml_windows.h
@@ -0,0 +1,686 @@
+#pragma once
+
+#ifndef WINML_H_
+#define WINML_H_
+
+#include "weak_buffer.h"
+#include "weak_single_threaded_iterable.h"
+
+#define RETURN_HR_IF_FAILED(expression) \
+ do { \
+ auto _hr = expression; \
+ if (FAILED(_hr)) \
+ { \
+ return static_cast(_hr); \
+ } \
+ } while (0)
+
+
+#define FAIL_FAST_IF_HR_FAILED(expression) \
+ do { \
+ auto _hr = expression; \
+ if (FAILED(_hr)) \
+ { \
+ __fastfail(static_cast(_hr)); \
+ } \
+ } while (0)
+
+
+struct float16 {
+ uint16_t value;
+};
+
+namespace Windows { namespace AI { namespace MachineLearning { namespace Details {
+
+class WinMLLearningModel;
+class WinMLLearningModelBinding;
+class WinMLLearningModelSession;
+class WinMLLearningModelResults;
+
+extern const __declspec(selectany) _Null_terminated_ wchar_t MachineLearningDll[] = L"windows.ai.machinelearning.dll";
+
+template struct Tensor { };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorFloat; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorFloat16Bit;};
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorInt8Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorUInt8Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorUInt16Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorInt16Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorUInt32Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorInt32Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorUInt64Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorInt64Bit; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorBoolean; };
+template <> struct Tensor { using Type = ABI::Windows::AI::MachineLearning::ITensorDouble; };
+
+template struct TensorRuntimeClassID { };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+template <> struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; };
+
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorFloat;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorFloat16Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorInt8Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorUInt8Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorUInt16Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorInt16Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorUInt32Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorInt32Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorUInt64Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorInt64Bit;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorBoolean;
+__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorDouble;
+
+template struct TensorFactory { };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorFloatStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorFloat16BitStatics;};
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt8BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt8BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt16BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt16BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt32BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt32BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt64BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt64BitStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorBooleanStatics; };
+template <> struct TensorFactory { using Factory = ABI::Windows::AI::MachineLearning::ITensorDoubleStatics; };
+
+
+template struct TensorFactory2 { };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorFloatStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorFloat16BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt8BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt8BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt16BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt16BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt32BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt32BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorUInt64BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorInt64BitStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorBooleanStatics2; };
+template <> struct TensorFactory2 { using Factory = ABI::Windows::AI::MachineLearning::ITensorDoubleStatics2; };
+
+
+
+template struct TensorFactoryIID { };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+template <> struct TensorFactoryIID { static const GUID IID; };
+
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloatStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloat16BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt8BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt8BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt16BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt16BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt32BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt32BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt64BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt64BitStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorBooleanStatics;
+__declspec(selectany) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics;
+
+template struct TensorFactory2IID { };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+template <> struct TensorFactory2IID { static const GUID IID; };
+
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloatStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloat16BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt8BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt8BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt16BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt16BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt32BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt32BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt64BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt64BitStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorBooleanStatics2;
+__declspec(selectany) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics2;
+
+
+inline HRESULT GetActivationFactory(
+ const wchar_t* p_class_id,
+ const IID& iid,
+ void** factory) noexcept
+{
+ // Fallback to OS binary if the redistributable is not present!
+ auto library = LoadLibraryExW(MachineLearningDll, nullptr, 0);
+
+ using DllGetActivationFactory = HRESULT __stdcall(HSTRING, void** factory);
+ auto call = reinterpret_cast(GetProcAddress(library, "DllGetActivationFactory"));
+ if (!call)
+ {
+ auto hr = HRESULT_FROM_WIN32(GetLastError());
+ FreeLibrary(library);
+ return hr;
+ }
+
+ Microsoft::WRL::ComPtr activation_factory;
+ auto hr = call(
+ Microsoft::WRL::Wrappers::HStringReference(p_class_id, static_cast(wcslen(p_class_id))).Get(),
+ reinterpret_cast(activation_factory.GetAddressOf()));
+
+ if (FAILED(hr))
+ {
+ FreeLibrary(library);
+ return hr;
+ }
+
+ return activation_factory->QueryInterface(iid, factory);
+}
+
+class WinMLLearningModel
+{
+ friend class WinMLLearningModelSession;
+
+public:
+ WinMLLearningModel(const wchar_t* model_path, size_t size)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(model_path, size));
+ }
+
+ WinMLLearningModel(const char* bytes, size_t size)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(bytes, size));
+ }
+
+private:
+ int32_t Initialize(const wchar_t* model_path, size_t size)
+ {
+ Microsoft::WRL::ComPtr learningModel;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModel,
+ ABI::Windows::AI::MachineLearning::IID_ILearningModelStatics,
+ &learningModel));
+
+ RETURN_HR_IF_FAILED(
+ learningModel->LoadFromFilePath(
+ Microsoft::WRL::Wrappers::HStringReference(model_path, static_cast(size)).Get(),
+ m_learning_model.GetAddressOf()));
+ return 0;
+ }
+
+ struct StoreCompleted :
+ Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags,
+ ABI::Windows::Foundation::IAsyncOperationCompletedHandler>
+ {
+ HANDLE completed_event_;
+
+ StoreCompleted() :
+ completed_event_(CreateEvent(nullptr, true, false, nullptr))
+ {}
+
+ ~StoreCompleted()
+ {
+ CloseHandle(completed_event_);
+ }
+
+ HRESULT STDMETHODCALLTYPE Invoke(
+ ABI::Windows::Foundation::IAsyncOperation *asyncInfo,
+ ABI::Windows::Foundation::AsyncStatus status)
+ {
+ SetEvent(completed_event_);
+ return S_OK;
+ }
+
+ HRESULT Wait()
+ {
+ WaitForSingleObject(completed_event_, INFINITE);
+ return S_OK;
+ }
+ };
+
+ int32_t Initialize(const char* bytes, size_t size)
+ {
+ RoInitialize(RO_INIT_TYPE::RO_INIT_SINGLETHREADED);
+
+ // Create in memory stream
+ Microsoft::WRL::ComPtr in_memory_random_access_stream_insp;
+ RETURN_HR_IF_FAILED(RoActivateInstance(
+ Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_InMemoryRandomAccessStream).Get(),
+ in_memory_random_access_stream_insp.GetAddressOf()));
+
+ // QI memory stream to output stream
+ Microsoft::WRL::ComPtr output_stream;
+ RETURN_HR_IF_FAILED(in_memory_random_access_stream_insp.As(&output_stream));
+
+ // Create data writer factory
+ Microsoft::WRL::ComPtr activation_factory;
+ RETURN_HR_IF_FAILED(RoGetActivationFactory(
+ Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_DataWriter).Get(),
+ IID_PPV_ARGS(activation_factory.GetAddressOf())));
+
+ // Create data writer object based on the in memory stream
+ Microsoft::WRL::ComPtr data_writer;
+ RETURN_HR_IF_FAILED(activation_factory->CreateDataWriter(
+ output_stream.Get(),
+ data_writer.GetAddressOf()));
+
+ // Write the model to the data writer and thus to the stream
+ RETURN_HR_IF_FAILED(
+ data_writer->WriteBytes(static_cast(size), reinterpret_cast(const_cast(bytes))));
+
+ // QI the in memory stream to a random access stream
+ Microsoft::WRL::ComPtr random_access_stream;
+ RETURN_HR_IF_FAILED(in_memory_random_access_stream_insp.As(&random_access_stream));
+
+ // Create a random access stream reference factory
+ Microsoft::WRL::ComPtr random_access_stream_ref_statics;
+ RETURN_HR_IF_FAILED(RoGetActivationFactory(
+ Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Storage_Streams_RandomAccessStreamReference).Get(),
+ IID_PPV_ARGS(random_access_stream_ref_statics.GetAddressOf())));
+
+ // Create a random access stream reference from the random access stream view on top of
+ // the in memory stream
+ Microsoft::WRL::ComPtr random_access_stream_ref;
+ RETURN_HR_IF_FAILED(random_access_stream_ref_statics->CreateFromStream(
+ random_access_stream.Get(),
+ random_access_stream_ref.GetAddressOf()));
+
+ // Create a learning model factory
+ Microsoft::WRL::ComPtr learning_model;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModel,
+ ABI::Windows::AI::MachineLearning::IID_ILearningModelStatics,
+ &learning_model));
+
+ Microsoft::WRL::ComPtr> async_operation;
+ RETURN_HR_IF_FAILED(data_writer->StoreAsync(&async_operation));
+ auto store_completed_handler = Microsoft::WRL::Make();
+ RETURN_HR_IF_FAILED(async_operation->put_Completed(store_completed_handler.Get()));
+ RETURN_HR_IF_FAILED(store_completed_handler->Wait());
+
+ // Create a learning model from the factory with the random access stream reference that points
+ // to the random access stream view on top of the in memory stream copy of the model
+ RETURN_HR_IF_FAILED(
+ learning_model->LoadFromStream(
+ random_access_stream_ref.Get(),
+ m_learning_model.GetAddressOf()));
+
+ return 0;
+ }
+
+private:
+ Microsoft::WRL::ComPtr m_learning_model;
+};
+
+class WinMLLearningModelResults
+{
+ friend class WinMLLearningModelSession;
+
+public:
+ int32_t get_output(
+ const wchar_t* feature_name,
+ size_t feature_name_size,
+ void** pp_buffer,
+ size_t* p_capacity)
+ {
+ Microsoft::WRL::ComPtr> output_map;
+ RETURN_HR_IF_FAILED(m_result->get_Outputs(&output_map));
+
+ Microsoft::WRL::ComPtr inspectable;
+ RETURN_HR_IF_FAILED(output_map->Lookup(
+ Microsoft::WRL::Wrappers::HStringReference(feature_name, static_cast(feature_name_size)).Get(),
+ inspectable.GetAddressOf()));
+
+ Microsoft::WRL::ComPtr output_feature_value;
+ RETURN_HR_IF_FAILED(inspectable.As(&output_feature_value));
+
+ Microsoft::WRL::ComPtr native_tensor_float_feature_value;
+ RETURN_HR_IF_FAILED(output_feature_value.As(&native_tensor_float_feature_value));
+
+ uint32_t size;
+ RETURN_HR_IF_FAILED(native_tensor_float_feature_value->GetBuffer(reinterpret_cast(pp_buffer), &size));
+ *p_capacity = size;
+
+ return 0;
+ }
+
+private:
+ WinMLLearningModelResults(ABI::Windows::AI::MachineLearning::ILearningModelEvaluationResult* p_result)
+ {
+ m_result = p_result;
+ }
+
+private:
+ Microsoft::WRL::ComPtr< ABI::Windows::AI::MachineLearning::ILearningModelEvaluationResult> m_result;
+};
+
+class WinMLLearningModelBinding
+{
+ friend class WinMLLearningModelSession;
+
+public:
+ WinMLLearningModelBinding(const WinMLLearningModelSession& session)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(session));
+ }
+
+ template
+ int32_t bind(
+ const wchar_t* feature_name, size_t feature_name_size,
+ tensor_shape_type* p_shape, size_t shape_size,
+ T* p_data, size_t data_size)
+ {
+ using ITensor = typename Tensor::Type;
+ using ITensorFactory = typename TensorFactory::Factory;
+
+ Microsoft::WRL::ComPtr tensor_factory;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ TensorRuntimeClassID::RuntimeClass_ID,
+ TensorFactoryIID::IID,
+ &tensor_factory));
+
+ Microsoft::WRL::ComPtr> input_shape_iterable;
+ RETURN_HR_IF_FAILED(
+ Microsoft::WRL::MakeAndInitialize>(
+ &input_shape_iterable, p_shape, p_shape + shape_size));
+
+ Microsoft::WRL::ComPtr tensor;
+ RETURN_HR_IF_FAILED(
+ tensor_factory->CreateFromArray(
+ input_shape_iterable.Get(),
+ static_cast(data_size),
+ p_data,
+ tensor.GetAddressOf()));
+
+ Microsoft::WRL::ComPtr inspectable_tensor;
+ RETURN_HR_IF_FAILED(tensor.As(&inspectable_tensor));
+
+ RETURN_HR_IF_FAILED(
+ m_learning_model_binding->Bind(
+ Microsoft::WRL::Wrappers::HStringReference(feature_name, static_cast(feature_name_size)).Get(),
+ inspectable_tensor.Get()));
+ return 0;
+ }
+
+ template
+ int32_t bind(
+ const wchar_t* /*feature_name*/, size_t /*feature_name_size*/,
+ tensor_shape_type* /*p_shape*/, size_t /*shape_size*/)
+ {
+ return 0;
+ }
+
+ template
+ int32_t bind_as_reference(
+ const wchar_t* feature_name, size_t feature_name_size,
+ tensor_shape_type* p_shape, size_t shape_size,
+ T* p_data, size_t data_size)
+ {
+ using ITensor = typename Tensor::Type;
+ using ITensorFactory = typename TensorFactory2::Factory;
+
+ Microsoft::WRL::ComPtr tensor_factory;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ TensorRuntimeClassID::RuntimeClass_ID,
+ TensorFactory2IID::IID,
+ &tensor_factory));
+
+ Microsoft::WRL::ComPtr> buffer;
+ RETURN_HR_IF_FAILED(
+ Microsoft::WRL::MakeAndInitialize>(
+ &buffer, p_data, p_data + data_size));
+
+ Microsoft::WRL::ComPtr tensor;
+ RETURN_HR_IF_FAILED(
+ tensor_factory->CreateFromBuffer(
+ static_cast(shape_size),
+ p_shape,
+ buffer.Get(),
+ tensor.GetAddressOf()));
+
+ Microsoft::WRL::ComPtr inspectable_tensor;
+ RETURN_HR_IF_FAILED(tensor.As(&inspectable_tensor));
+
+ RETURN_HR_IF_FAILED(
+ m_learning_model_binding->Bind(
+ Microsoft::WRL::Wrappers::HStringReference(feature_name, static_cast(feature_name_size)).Get(),
+ inspectable_tensor.Get()));
+ return 0;
+ }
+
+private:
+ inline int32_t Initialize(const WinMLLearningModelSession& session);
+
+private:
+ Microsoft::WRL::ComPtr m_learning_model_binding;
+};
+
+class WinMLLearningModelDevice
+{
+ friend class WinMLLearningModelSession;
+
+public:
+ WinMLLearningModelDevice() :
+ WinMLLearningModelDevice(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind_Default)
+ {}
+
+ WinMLLearningModelDevice(WinMLLearningModelDevice&& device) :
+ m_learning_model_device(std::move(device.m_learning_model_device))
+ {}
+
+ WinMLLearningModelDevice(const WinMLLearningModelDevice& device) :
+ m_learning_model_device(device.m_learning_model_device)
+ {}
+
+ void operator=(const WinMLLearningModelDevice& device)
+ {
+ m_learning_model_device = device.m_learning_model_device;
+ }
+
+ WinMLLearningModelDevice(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind kind)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(kind));
+ }
+
+ WinMLLearningModelDevice(ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice* d3dDevice)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(d3dDevice));
+ }
+
+ WinMLLearningModelDevice(ID3D12CommandQueue* queue)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(queue));
+ }
+
+ static WinMLLearningModelDevice create_cpu_device()
+ {
+ return WinMLLearningModelDevice(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind_Cpu);
+ }
+
+ static WinMLLearningModelDevice create_directx_device()
+ {
+ return WinMLLearningModelDevice(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind_DirectX);
+ }
+
+ static WinMLLearningModelDevice create_directx_high_power_device()
+ {
+ return WinMLLearningModelDevice(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind_DirectXHighPerformance);
+ }
+
+ static WinMLLearningModelDevice create_directx_min_power_device()
+ {
+ return WinMLLearningModelDevice(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind_DirectXMinPower);
+ }
+
+ static WinMLLearningModelDevice create_directx_device(ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice* d3dDevice)
+ {
+ return WinMLLearningModelDevice(d3dDevice);
+ }
+
+ static WinMLLearningModelDevice create_directx_device(ID3D12CommandQueue* queue)
+ {
+ return WinMLLearningModelDevice(queue);
+ }
+
+private:
+ int32_t Initialize(ABI::Windows::AI::MachineLearning::LearningModelDeviceKind kind)
+ {
+ Microsoft::WRL::ComPtr
+ learning_model_device_factory;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModelDevice,
+ ABI::Windows::AI::MachineLearning::IID_ILearningModelDeviceFactory,
+ &learning_model_device_factory));
+
+ RETURN_HR_IF_FAILED(learning_model_device_factory->Create(kind, &m_learning_model_device));
+
+ return 0;
+ }
+
+ int32_t Initialize(ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice* d3dDevice)
+ {
+ Microsoft::WRL::ComPtr
+ learning_model_device_factory;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModelDevice,
+ ABI::Windows::AI::MachineLearning::IID_ILearningModelDeviceStatics,
+ &learning_model_device_factory));
+
+ RETURN_HR_IF_FAILED(learning_model_device_factory->CreateFromDirect3D11Device(d3dDevice, &m_learning_model_device));
+
+ return 0;
+ }
+
+ int32_t Initialize(ID3D12CommandQueue* queue)
+ {
+ Microsoft::WRL::ComPtr
+ learning_model_device_factory;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModelDevice,
+ __uuidof(ILearningModelDeviceFactoryNative),
+ &learning_model_device_factory));
+
+ RETURN_HR_IF_FAILED(learning_model_device_factory->CreateFromD3D12CommandQueue(queue, &m_learning_model_device));
+
+ return 0;
+ }
+
+private:
+ Microsoft::WRL::ComPtr m_learning_model_device;
+};
+
+class WinMLLearningModelSession
+{
+ friend class WinMLLearningModelBinding;
+
+public:
+ using Model = WinMLLearningModel;
+ using Device = WinMLLearningModelDevice;
+
+public:
+ WinMLLearningModelSession(const Model& model)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(model, Device()));
+ }
+
+ WinMLLearningModelSession(const Model& model, const Device& device)
+ {
+ ML_FAIL_FAST_IF(0 != Initialize(model, device));
+ }
+
+ WinMLLearningModelResults evaluate(WinMLLearningModelBinding& binding)
+ {
+ Microsoft::WRL::ComPtr
+ m_learning_model_evaluation_result;
+
+ FAIL_FAST_IF_HR_FAILED(
+ m_learning_model_session->Evaluate(
+ binding.m_learning_model_binding.Get(),
+ nullptr,
+ m_learning_model_evaluation_result.GetAddressOf()));
+
+ return WinMLLearningModelResults(m_learning_model_evaluation_result.Get());
+ }
+
+private:
+ int32_t Initialize(const Model& model, const Device& device)
+ {
+ // {0f6b881d-1c9b-47b6-bfe0-f1cf62a67579}
+ static const GUID IID_ILearningModelSessionFactory =
+ { 0x0f6b881d, 0x1c9b, 0x47b6, { 0xbf, 0xe0, 0xf1, 0xcf, 0x62, 0xa6, 0x75, 0x79 } };
+
+ Microsoft::WRL::ComPtr
+ m_learning_model_session_factory;
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModelSession,
+ IID_ILearningModelSessionFactory,
+ &m_learning_model_session_factory));
+
+ RETURN_HR_IF_FAILED(
+ m_learning_model_session_factory->CreateFromModelOnDevice(
+ model.m_learning_model.Get(),
+ device.m_learning_model_device.Get(),
+ m_learning_model_session.GetAddressOf()));
+
+ return 0;
+ }
+
+private:
+ Microsoft::WRL::ComPtr m_learning_model_session;
+};
+
+inline int32_t WinMLLearningModelBinding::Initialize(const WinMLLearningModelSession& session)
+{
+ // {c95f7a7a-e788-475e-8917-23aa381faf0b}
+ static const GUID IID_ILearningModelBindingFactory =
+ { 0xc95f7a7a, 0xe788, 0x475e, { 0x89, 0x17, 0x23, 0xaa, 0x38, 0x1f, 0xaf, 0x0b } };
+
+ Microsoft::WRL::ComPtr
+ learning_model_binding_factory;
+
+ RETURN_HR_IF_FAILED(
+ GetActivationFactory(
+ RuntimeClass_Windows_AI_MachineLearning_LearningModelBinding,
+ IID_ILearningModelBindingFactory,
+ &learning_model_binding_factory));
+
+ RETURN_HR_IF_FAILED(
+ learning_model_binding_factory->CreateFromSession(
+ session.m_learning_model_session.Get(),
+ m_learning_model_binding.GetAddressOf()));
+
+ return 0;
+}
+
+}}}} // namespace Microsoft::AI::MachineLearning::Details
+
+#endif // WINML_H_
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/readme.md b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/readme.md
new file mode 100644
index 00000000..69eba7ba
--- /dev/null
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/SqueezeNetObjectDetectionCPPNonWinRT/readme.md
@@ -0,0 +1 @@
+This sample shows how to use Windows Machine Learning without the use of winrt. The winml_windows.h header exposes a wrapper API on top of the Windows ABI header interfaces.
\ No newline at end of file
diff --git a/Samples/SqueezeNetObjectDetection/Desktop/cpp/pch.h b/Samples/SqueezeNetObjectDetection/Desktop/cpp/pch.h
index 4c14c070..d63b5852 100644
--- a/Samples/SqueezeNetObjectDetection/Desktop/cpp/pch.h
+++ b/Samples/SqueezeNetObjectDetection/Desktop/cpp/pch.h
@@ -28,6 +28,7 @@
#include
#include
#include
+#include
using convert_type = std::codecvt_utf8;
using wstring_to_utf8 = std::wstring_convert;
diff --git a/Samples/SqueezeNetObjectDetection/SqueezeNetObjectDetection.sln b/Samples/SqueezeNetObjectDetection/SqueezeNetObjectDetection.sln
index bd17b015..7216fce8 100644
--- a/Samples/SqueezeNetObjectDetection/SqueezeNetObjectDetection.sln
+++ b/Samples/SqueezeNetObjectDetection/SqueezeNetObjectDetection.sln
@@ -16,6 +16,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SqueezeNetObjectDetectionNC
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SampleSharedLib", "..\SampleSharedLib\SampleSharedLib\SampleSharedLib.vcxproj", "{12103A5B-677A-4286-83D2-54EAB9010C16}"
EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SqueezeNetObjectDetectionCPPNonWinRT", "Desktop\cpp\SqueezeNetObjectDetectionCPPNonWinRT\SqueezeNetObjectDetectionCPPNonWinRT.vcxproj", "{73B73100-4B52-4073-A4AA-289158526A19}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -112,6 +114,18 @@ Global
{12103A5B-677A-4286-83D2-54EAB9010C16}.Release|x64.Build.0 = Release|x64
{12103A5B-677A-4286-83D2-54EAB9010C16}.Release|x86.ActiveCfg = Release|Win32
{12103A5B-677A-4286-83D2-54EAB9010C16}.Release|x86.Build.0 = Release|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Debug|ARM.ActiveCfg = Debug|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Debug|x64.ActiveCfg = Debug|x64
+ {73B73100-4B52-4073-A4AA-289158526A19}.Debug|x64.Build.0 = Debug|x64
+ {73B73100-4B52-4073-A4AA-289158526A19}.Debug|x86.ActiveCfg = Debug|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Debug|x86.Build.0 = Debug|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Release|Any CPU.ActiveCfg = Release|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Release|ARM.ActiveCfg = Release|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Release|x64.ActiveCfg = Release|x64
+ {73B73100-4B52-4073-A4AA-289158526A19}.Release|x64.Build.0 = Release|x64
+ {73B73100-4B52-4073-A4AA-289158526A19}.Release|x86.ActiveCfg = Release|Win32
+ {73B73100-4B52-4073-A4AA-289158526A19}.Release|x86.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE