Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8.1 Release #2394

Merged
merged 1 commit into from
Nov 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
29 changes: 28 additions & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ test_py310_coremltools_test:
PYTHON: "3.10"
REQUIREMENTS: reqs/test.pip

test_py310_pytorch:
test_py310_pytorch_script:
<<: *test_macos_pkg
tags:
- macOS_M1
Expand All @@ -201,6 +201,33 @@ test_py310_pytorch:
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
TORCH_FRONTENDS: TORCHSCRIPT

test_py310_pytorch_export:
<<: *test_macos_pkg
tags:
- macOS_M1
dependencies:
- build_wheel_macos_py310
variables:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
TORCH_FRONTENDS: TORCHEXPORT

test_py310_pytorch_executorch:
<<: *test_macos_pkg
tags:
- macOS_M1
dependencies:
- build_wheel_macos_py310
variables:
PYTHON: "3.10"
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/*cp310*11*
REQUIREMENTS: reqs/test.pip
TORCH_FRONTENDS: EXECUTORCH

test_py310_tf2-1:
<<: *test_macos_pkg
Expand Down
4 changes: 2 additions & 2 deletions BUILDING.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Follow these steps:
1. Fork and clone the GitHub [coremltools repository](https://github.com/apple/coremltools).

2. Run the [build.sh](scripts/build.sh) script to build `coremltools`.
* By default this script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`) as a argument to change the Python version.
* By default this script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`) as a argument to change the Python version.
* The script creates a new `build` folder with the coremltools distribution, and a `dist` folder with Python wheel files.

3. Run the [test.sh](scripts/test.sh) script to test the build.
Expand All @@ -45,7 +45,7 @@ The following build targets help you configure the development environment. If y
* `test_slow` | Run all non-fast tests.
* `wheel` | Build wheels in release mode.

The script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`) as a argument to change the Python version.
The script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`) as a argument to change the Python version.

## Resources

Expand Down
187 changes: 181 additions & 6 deletions coremlpython/CoreMLPython.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
#pragma clang diagnostic pop

#import <CoreML/CoreML.h>
#import <Availability.h>

#import <vector>

#ifndef BUILT_WITH_MACOS15_SDK
#define BUILT_WITH_MACOS15_SDK \
Expand All @@ -28,25 +30,181 @@
#pragma message ("Building without macOS 15 SDK")
#endif

#if !defined(ML_COMPUTE_PLAN_IS_AVAILABLE) && __has_include(<CoreML/MLComputePlan.h>)
#define ML_COMPUTE_PLAN_IS_AVAILABLE 1
#endif

#if !defined(ML_MODEL_STRUCTURE_IS_AVAILABLE) && __has_include(<CoreML/MLModelStructure.h>)
#define ML_MODEL_STRUCTURE_IS_AVAILABLE 1
#endif

#if !defined(ML_COMPUTE_DEVICE_IS_AVAILABLE) && __has_include(<CoreML/MLComputeDeviceProtocol.h>)
#define ML_COMPUTE_DEVICE_IS_AVAILABLE 1
#endif

#if !defined(ML_MODEL_ASSET_IS_AVAILABLE) && __has_include(<CoreML/MLModelAsset.h>)
#define ML_MODEL_ASSET_IS_AVAILABLE 1
#endif

#if !defined(ML_STATE_IS_AVAILABLE) && __has_include(<CoreML/MLState.h>)
#define ML_STATE_IS_AVAILABLE 1
#endif


namespace py = pybind11;

namespace CoreML {
namespace Python {


struct State {
#if BUILT_WITH_MACOS15_SDK
// MLState must be wrapped in a C++ class for PyBind.
MLState* m_state = nil;
inline State(id impl):
m_impl(impl) {}

#if ML_STATE_IS_AVAILABLE
API_AVAILABLE(macos(15.0))
inline MLState *getImpl() const {
return (MLState *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct CPUComputeDevice {
// MLCPUComputeDevice must be wrapped in a C++ class for PyBind.
inline CPUComputeDevice(id impl):
m_impl(impl) {}

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
API_AVAILABLE(macos(14.0))
inline MLCPUComputeDevice *getImpl() const {
return (MLCPUComputeDevice *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct GPUComputeDevice {
// MLGPUComputeDevice must be wrapped in a C++ class for PyBind.
inline GPUComputeDevice(id impl):
m_impl(impl) {}

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
API_AVAILABLE(macos(14.0))
inline MLGPUComputeDevice *getImpl() const {
return (MLGPUComputeDevice *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct NeuralEngineComputeDevice {
// MLNeuralEngineComputeDevice must be wrapped in a C++ class for PyBind.
inline NeuralEngineComputeDevice(id impl):
m_impl(impl) {}

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
API_AVAILABLE(macos(14.0))
inline MLNeuralEngineComputeDevice *getImpl() const {
return (MLNeuralEngineComputeDevice *)m_impl;
}
#endif

int getTotalCoreCount() const;

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct ModelStructureProgramOperation {
// MLModelStructureProgramOperation must be wrapped in a C++ class for PyBind.
inline ModelStructureProgramOperation(id impl):
m_impl(impl) {}

#if ML_MODEL_STRUCTURE_IS_AVAILABLE
API_AVAILABLE(macos(14.4))
inline MLModelStructureProgramOperation *getImpl() const {
return (MLModelStructureProgramOperation *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
__weak id m_impl = nil;
};

struct ModelStructureNeuralNetworkLayer {
// ModelStructureNeuralNetworkLayer must be wrapped in a C++ class for PyBind.
inline ModelStructureNeuralNetworkLayer(id impl):
m_impl(impl) {}

#if ML_MODEL_STRUCTURE_IS_AVAILABLE
API_AVAILABLE(macos(14.4))
inline MLModelStructureNeuralNetworkLayer *getImpl() const {
return (MLModelStructureNeuralNetworkLayer *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
__weak id m_impl = nil;
};

struct ComputePlan {
// MLComputePlan must be wrapped in a C++ class for PyBind.
inline ComputePlan(id impl, py::object modelStructure):
m_impl(impl),
m_modelStructure(modelStructure) {}

inline py::object getModelStructure() const {
return m_modelStructure;
}

#if ML_COMPUTE_PLAN_IS_AVAILABLE
API_AVAILABLE(macos(14.4))
inline MLComputePlan *getImpl() const {
return (MLComputePlan *)m_impl;
}

py::object getComputeDeviceUsageForMLProgramOperation(py::object operation);
py::object getComputeDeviceUsageForNeuralNetworkLayer(py::object layer);
py::object getEstimatedCostForMLProgramOperation(py::object operation);
#endif

private:
id m_impl = nil;
py::object m_modelStructure;
};

struct ModelAsset {
// MLModelAsset must be wrapped in a C++ class for PyBind.
inline ModelAsset(id impl, std::vector<py::bytes> datas):
m_impl(impl),
m_datas(std::move(datas)) {}

API_AVAILABLE(macos(13.0))
inline MLModelAsset *getImpl() const {
return (MLModelAsset *)m_impl;
}

id m_impl = nil;
std::vector<py::bytes> m_datas;
};

class Model {
private:
MLModel *m_model = nil;
NSURL *compiledUrl = nil;
bool m_deleteCompiledModelOnExit;
bool m_deleteCompiledModelOnExit = false;

public:
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
Expand All @@ -57,7 +215,18 @@ namespace CoreML {
Model(const Model&) = delete;
Model& operator=(const Model&) = delete;
~Model();
explicit Model(const std::string& urlStr, const std::string& computeUnits, const std::string& functionName, const py::dict& optimizationHints);
explicit Model(const std::string& urlStr,
const std::string& computeUnits,
const std::string& functionName,
const py::dict& optimizationHints,
const py::object& asset);

explicit Model(const std::string& urlStr,
const std::string& computeUnits,
const std::string& functionName,
const py::dict& optimizationHints);


explicit Model(MLModel* m_model, NSURL* compiledUrl, bool deleteCompiledModelOnExit);

py::list batchPredict(const py::list& batch) const;
Expand All @@ -71,6 +240,12 @@ namespace CoreML {
State newState() const;
#endif

static py::object createModelAssetFromPath(const std::string& path);
static py::object createModelAssetFromMemory(const py::bytes& specData, const py::dict& blobMapping);
static py::object getModelStructure(const std::string& modelPath);
static py::list getAvailableComputeDevices();
static py::list getAllComputeDevices();
static py::object getComputePlan(const std::string& modelPath, const std::string& computeUnits);
};
}
}
Loading