You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
<?xml version="1.0" encoding="UTF-8" ?>
<nodename="/Org/Tizen/MachineLearning/Service">
<interfacename="org.tizen.machinelearning.service.pipeline">
<!-- Register the pipeline with given description. Return the call result and its id. -->
<methodname="register_pipeline">
<argtype="s"name="pipeline"direction="in" />
<argtype="i"name="result"direction="out" />
<argtype="x"name="id"direction="out" />
</method>
<!-- Start the pipeline with given id. -->
<methodname="start_pipeline">
<argtype="x"name="id"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Stop the pipeline with given id -->
<methodname="stop_pipeline">
<argtype="x"name="id"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Destroy the pipeline with given id -->
<methodname="destroy_pipeline">
<argtype="x"name="id"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Get the state of pipeline with given id. -->
<methodname="get_state">
<argtype="x"name="id"direction="in" />
<argtype="i"name="result"direction="out" />
<argtype="i"name="state"direction="out" />
</method>
<!-- Get the description of pipeline with given id. -->
<methodname="get_description">
<argtype="x"name="id"direction="in" />
<argtype="i"name="result"direction="out" />
<argtype="s"name="description"direction="out" />
</method>
<!-- Sets the pipeline description with a given name. -->
<methodname="Set">
<argtype="s"name="name"direction="in" />
<argtype="s"name="description"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Gets the pipeline description with a given name. -->
<methodname="Get">
<argtype="s"name="name"direction="in" />
<argtype="s"name="description"direction="out" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Deletes the pipeline description with a given name. -->
<methodname="Delete">
<argtype="s"name="name"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
</interface>
</node>
Model Interface
<?xml version="1.0" encoding="UTF-8" ?>
<nodename="/Org/Tizen/MachineLearning/Service">
<interfacename="org.tizen.machinelearning.service.model">
<!-- Set the file path of the designated neural network model -->
<methodname="SetPath">
<argtype="s"name="name"direction="in" />
<argtype="s"name="path"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Get the file path of the designated neural network model -->
<methodname="GetPath">
<argtype="s"name="name"direction="in" />
<argtype="s"name="path"direction="out" />
<argtype="i"name="result"direction="out" />
</method>
<!-- Delete the file path of the designated neural network model -->
<methodname="Delete">
<argtype="s"name="name"direction="in" />
<argtype="i"name="result"direction="out" />
</method>
</interface>
</node>
Service API
Server Side
/* M1 Release */intml_service_set_pipeline (constchar *name, constchar *pipeline_desc);
intml_service_get_pipeline (constchar *name, char **pipeline_desc);
intml_service_delete_pipeline (constchar *name);
/* WIP */intml_service_pipeline_construct (constchar *name, ml_pipeline_state_cb cb, void *user_data, ml_pipeline_h *pipe);
intml_service_model_add (constchar *name, const ml_service_model_description * desc);
intml_service_server_getstate (ml_service_server_h h, ml_pipeline_state_e *state);
intml_service_server_getdesc (ml_service_server_h h, char ** desc);
intml_service_server_start (ml_service_server_h h);
intml_service_server_stop (ml_service_server_h h);
intml_service_server_close (ml_service_server_h h);
/** * @brief TBU / Query Server AI Service * @detail * Rule 1. The pipeline should not have appsink, tensor_sink, appsrc or any other app-thread dependencies. * Rule 2. Add "#INPUT#" and "#OUTPUT#" elements where input/output streams exist. * E.g., " #INPUT# ! ... ! tensor-filter ... ! ... ! #OUTPUT# ". * Rule 3. There should be exactly one pair of #INPUT# and #OUTPUT#. * Rule 4. Supply input/output metadata with input_info & output_info. * This is the simplist method, but restricted to static tensor streams.*/intml_service_server_open_queryserver_static_tensors (ml_service_server_h *h, constchar *topic_name, constchar * desc, const ml_tensors_info_h input_info, const ml_tensors_info_h output_info);
/** * @brief TBU / Query Server AI Service * @detail * Rule 1. The pipeline should not have appsink, tensor_sink, appsrc or any other app-thread dependencies. * Rule 2. You may add "#INPUT#" and "#OUTPUT#" elements if you do not know how to use tensor-query-server. * E.g., " #INPUT# ! tensor-filter ... ! ... ! #OUTPUT# ". * Rule 3. There should be exactly one pair of #INPUT# and #OUTPUT#. * Rule 4. Supply input/output metadata with gstcap_in and gstcap_out. * This supports general GStreamer streams and general Tensor streams.*/intml_service_server_open_queryserver_gstcaps (ml_service_server_h *h, constchar *topic_name, constchar * desc, constchar *gstcap_in, constchar *gstcap_out);
/** * @brief TBU / Query Server AI Service * @detail * Rule 1. The pipeline should have a single pair of tensor-query-server-{sink / src}. * Rule 2. The pipeline should not have appsink, tensor_sink, appsrc or any other app-thread dependencies. * Rule 3. There should be exactly one pair of #INPUT# and #OUTPUT# if you use them. * Rule 4. Add capsfilter or capssetter after src and before sink. * This is for seasoned gstreamer/nnstreamer users who have some experiences in pipeline writing.*/intml_service_server_open_queryserver_fulldesc (ml_service_server_h *h, constchar *topic_name, constchar * desc);
/** * @brief TBU / PUB/SUB AI Service * @detail * use "#OUTPUT#" unless you use fulldesc * don't rely on app threads (no appsink, appsrc, tensorsink or so on)*/intml_service_server_open_publisher_static_tensors (ml_service_server_h *h, constchar *topic_name, constchar * desc, const ml_tensors_data_h out);
intml_service_server_open_publisher_gstcaps (ml_service_server_h *h, constchar *topic_name, constchar * desc, constchar *gstcap_out);
intml_service_server_open_publisher_fulldesc (ml_service_server_h *h, constchar *topic_name, constchar * desc);
/** * @brief TBU / Client-side helpers * @detail * Please use a pipeline for more efficient usage. This API is for testing or apps that can afford high-latency * @param [out] in Input tensors info. Set null if you don't need this info. * @param [out] out Output tensors info. Set null if you don't need this info. * Note that we do not know if in/out is possible for remote clients, yet.*/intml_service_client_open_query (ml_service_client_h *h, constchar *topic_name, ml_tensors_info_h *in, ml_tensors_info_h *out);
intml_service_client_open_subscriber (ml_service_client_h *h, constchar *topic_name, ml_pipeline_sink_cb func, void *user_data);
intml_service_client_query (ml_service_client_h h, const ml_tensors_data_h in, ml_tensors_data_h out);
intml_service_client_close (ml_service_client_h h);
DBus Interface
Pipeline Interface
Model Interface
Service API
Server Side
Use case #1
The text was updated successfully, but these errors were encountered: