Skip to content

Commit

Permalink
8.1 Release
Browse files Browse the repository at this point in the history
  • Loading branch information
cymbalrush committed Nov 18, 2024
1 parent 68b4f5a commit 6024d59
Show file tree
Hide file tree
Showing 2,771 changed files with 657,027 additions and 432,947 deletions.
4 changes: 2 additions & 2 deletions BUILDING.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Follow these steps:
1. Fork and clone the GitHub [coremltools repository](https://github.com/apple/coremltools).

2. Run the [build.sh](scripts/build.sh) script to build `coremltools`.
* By default this script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`) as a argument to change the Python version.
* By default this script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`) as a argument to change the Python version.
* The script creates a new `build` folder with the coremltools distribution, and a `dist` folder with Python wheel files.

3. Run the [test.sh](scripts/test.sh) script to test the build.
Expand All @@ -45,7 +45,7 @@ The following build targets help you configure the development environment. If y
* `test_slow` | Run all non-fast tests.
* `wheel` | Build wheels in release mode.

The script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`) as a argument to change the Python version.
The script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`) as a argument to change the Python version.

## Resources

Expand Down
187 changes: 181 additions & 6 deletions coremlpython/CoreMLPython.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
#pragma clang diagnostic pop

#import <CoreML/CoreML.h>
#import <Availability.h>

#import <vector>

#ifndef BUILT_WITH_MACOS15_SDK
#define BUILT_WITH_MACOS15_SDK \
Expand All @@ -28,25 +30,181 @@
#pragma message ("Building without macOS 15 SDK")
#endif

#if !defined(ML_COMPUTE_PLAN_IS_AVAILABLE) && __has_include(<CoreML/MLComputePlan.h>)
#define ML_COMPUTE_PLAN_IS_AVAILABLE 1
#endif

#if !defined(ML_MODEL_STRUCTURE_IS_AVAILABLE) && __has_include(<CoreML/MLModelStructure.h>)
#define ML_MODEL_STRUCTURE_IS_AVAILABLE 1
#endif

#if !defined(ML_COMPUTE_DEVICE_IS_AVAILABLE) && __has_include(<CoreML/MLComputeDeviceProtocol.h>)
#define ML_COMPUTE_DEVICE_IS_AVAILABLE 1
#endif

#if !defined(ML_MODEL_ASSET_IS_AVAILABLE) && __has_include(<CoreML/MLModelAsset.h>)
#define ML_MODEL_ASSET_IS_AVAILABLE 1
#endif

#if !defined(ML_STATE_IS_AVAILABLE) && __has_include(<CoreML/MLState.h>)
#define ML_STATE_IS_AVAILABLE 1
#endif


namespace py = pybind11;

namespace CoreML {
namespace Python {


struct State {
#if BUILT_WITH_MACOS15_SDK
// MLState must be wrapped in a C++ class for PyBind.
MLState* m_state = nil;
inline State(id impl):
m_impl(impl) {}

#if ML_STATE_IS_AVAILABLE
API_AVAILABLE(macos(15.0))
inline MLState *getImpl() const {
return (MLState *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct CPUComputeDevice {
// MLCPUComputeDevice must be wrapped in a C++ class for PyBind.
inline CPUComputeDevice(id impl):
m_impl(impl) {}

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
API_AVAILABLE(macos(14.0))
inline MLCPUComputeDevice *getImpl() const {
return (MLCPUComputeDevice *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct GPUComputeDevice {
// MLGPUComputeDevice must be wrapped in a C++ class for PyBind.
inline GPUComputeDevice(id impl):
m_impl(impl) {}

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
API_AVAILABLE(macos(14.0))
inline MLGPUComputeDevice *getImpl() const {
return (MLGPUComputeDevice *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct NeuralEngineComputeDevice {
// MLNeuralEngineComputeDevice must be wrapped in a C++ class for PyBind.
inline NeuralEngineComputeDevice(id impl):
m_impl(impl) {}

#if ML_COMPUTE_DEVICE_IS_AVAILABLE
API_AVAILABLE(macos(14.0))
inline MLNeuralEngineComputeDevice *getImpl() const {
return (MLNeuralEngineComputeDevice *)m_impl;
}
#endif

int getTotalCoreCount() const;

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
id m_impl = nil;
};

struct ModelStructureProgramOperation {
// MLModelStructureProgramOperation must be wrapped in a C++ class for PyBind.
inline ModelStructureProgramOperation(id impl):
m_impl(impl) {}

#if ML_MODEL_STRUCTURE_IS_AVAILABLE
API_AVAILABLE(macos(14.4))
inline MLModelStructureProgramOperation *getImpl() const {
return (MLModelStructureProgramOperation *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
__weak id m_impl = nil;
};

struct ModelStructureNeuralNetworkLayer {
// ModelStructureNeuralNetworkLayer must be wrapped in a C++ class for PyBind.
inline ModelStructureNeuralNetworkLayer(id impl):
m_impl(impl) {}

#if ML_MODEL_STRUCTURE_IS_AVAILABLE
API_AVAILABLE(macos(14.4))
inline MLModelStructureNeuralNetworkLayer *getImpl() const {
return (MLModelStructureNeuralNetworkLayer *)m_impl;
}
#endif

private:
// Type erase `m_impl` otherwise it will result in a compiler warning.
__weak id m_impl = nil;
};

struct ComputePlan {
// MLComputePlan must be wrapped in a C++ class for PyBind.
inline ComputePlan(id impl, py::object modelStructure):
m_impl(impl),
m_modelStructure(modelStructure) {}

inline py::object getModelStructure() const {
return m_modelStructure;
}

#if ML_COMPUTE_PLAN_IS_AVAILABLE
API_AVAILABLE(macos(14.4))
inline MLComputePlan *getImpl() const {
return (MLComputePlan *)m_impl;
}

py::object getComputeDeviceUsageForMLProgramOperation(py::object operation);
py::object getComputeDeviceUsageForNeuralNetworkLayer(py::object layer);
py::object getEstimatedCostForMLProgramOperation(py::object operation);
#endif

private:
id m_impl = nil;
py::object m_modelStructure;
};

struct ModelAsset {
// MLModelAsset must be wrapped in a C++ class for PyBind.
inline ModelAsset(id impl, std::vector<py::bytes> datas):
m_impl(impl),
m_datas(std::move(datas)) {}

API_AVAILABLE(macos(13.0))
inline MLModelAsset *getImpl() const {
return (MLModelAsset *)m_impl;
}

id m_impl = nil;
std::vector<py::bytes> m_datas;
};

class Model {
private:
MLModel *m_model = nil;
NSURL *compiledUrl = nil;
bool m_deleteCompiledModelOnExit;
bool m_deleteCompiledModelOnExit = false;

public:
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
Expand All @@ -57,7 +215,18 @@ namespace CoreML {
Model(const Model&) = delete;
Model& operator=(const Model&) = delete;
~Model();
explicit Model(const std::string& urlStr, const std::string& computeUnits, const std::string& functionName, const py::dict& optimizationHints);
explicit Model(const std::string& urlStr,
const std::string& computeUnits,
const std::string& functionName,
const py::dict& optimizationHints,
const py::object& asset);

explicit Model(const std::string& urlStr,
const std::string& computeUnits,
const std::string& functionName,
const py::dict& optimizationHints);


explicit Model(MLModel* m_model, NSURL* compiledUrl, bool deleteCompiledModelOnExit);

py::list batchPredict(const py::list& batch) const;
Expand All @@ -71,6 +240,12 @@ namespace CoreML {
State newState() const;
#endif

static py::object createModelAssetFromPath(const std::string& path);
static py::object createModelAssetFromMemory(const py::bytes& specData, const py::dict& blobMapping);
static py::object getModelStructure(const std::string& modelPath);
static py::list getAvailableComputeDevices();
static py::list getAllComputeDevices();
static py::object getComputePlan(const std::string& modelPath, const std::string& computeUnits);
};
}
}
Loading

0 comments on commit 6024d59

Please sign in to comment.