Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DBus Interface #179

Open
again4you opened this issue Jun 17, 2022 · 1 comment
Open

DBus Interface #179

again4you opened this issue Jun 17, 2022 · 1 comment
Assignees

Comments

@again4you
Copy link
Collaborator

again4you commented Jun 17, 2022

DBus Interface

Pipeline Interface

<?xml version="1.0" encoding="UTF-8" ?>
<node name="/Org/Tizen/MachineLearning/Service">
  <interface name="org.tizen.machinelearning.service.pipeline">
    <!-- Register the pipeline with given description. Return the call result and its id. -->
    <method name="register_pipeline">
      <arg type="s" name="pipeline" direction="in" />
      <arg type="i" name="result" direction="out" />
      <arg type="x" name="id" direction="out" />
    </method>
    <!-- Start the pipeline with given id. -->
    <method name="start_pipeline">
      <arg type="x" name="id" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Stop the pipeline with given id -->
    <method name="stop_pipeline">
      <arg type="x" name="id" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Destroy the pipeline with given id -->
    <method name="destroy_pipeline">
      <arg type="x" name="id" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Get the state of pipeline with given id. -->
    <method name="get_state">
      <arg type="x" name="id" direction="in" />
      <arg type="i" name="result" direction="out" />
      <arg type="i" name="state" direction="out" />
    </method>
    <!-- Get the description of pipeline with given id. -->
    <method name="get_description">
      <arg type="x" name="id" direction="in" />
      <arg type="i" name="result" direction="out" />
      <arg type="s" name="description" direction="out" />
    </method>

    <!-- Sets the pipeline description with a given name. -->
    <method name="Set">
      <arg type="s" name="name" direction="in" />
      <arg type="s" name="description" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Gets the pipeline description with a given name. -->
    <method name="Get">
      <arg type="s" name="name" direction="in" />
      <arg type="s" name="description" direction="out" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Deletes the pipeline description with a given name. -->
    <method name="Delete">
      <arg type="s" name="name" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
  </interface>
</node>

Model Interface

<?xml version="1.0" encoding="UTF-8" ?>
<node name="/Org/Tizen/MachineLearning/Service">
  <interface name="org.tizen.machinelearning.service.model">
    <!-- Set the file path of the designated neural network model -->
    <method name="SetPath">
      <arg type="s" name="name" direction="in" />
      <arg type="s" name="path" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Get the file path of the designated neural network model -->
    <method name="GetPath">
      <arg type="s" name="name" direction="in" />
      <arg type="s" name="path" direction="out" />
      <arg type="i" name="result" direction="out" />
    </method>
    <!-- Delete the file path of the designated neural network model -->
    <method name="Delete">
      <arg type="s" name="name" direction="in" />
      <arg type="i" name="result" direction="out" />
    </method>
  </interface>
</node>

Service API

Server Side

/* M1 Release */
int ml_service_set_pipeline (const char *name, const char *pipeline_desc);
int ml_service_get_pipeline (const char *name, char **pipeline_desc);
int ml_service_delete_pipeline (const char *name);

/* WIP */
int ml_service_pipeline_construct (const char *name, ml_pipeline_state_cb cb, void *user_data, ml_pipeline_h *pipe);
int ml_service_model_add (const char *name, const ml_service_model_description * desc);

int ml_service_server_getstate (ml_service_server_h h, ml_pipeline_state_e *state);
int ml_service_server_getdesc (ml_service_server_h h, char ** desc);
int ml_service_server_start (ml_service_server_h h);
int ml_service_server_stop (ml_service_server_h h);
int ml_service_server_close (ml_service_server_h h);

/**
 * @brief TBU / Query Server AI Service
 * @detail
 *   Rule 1. The pipeline should not have appsink, tensor_sink, appsrc or any other app-thread dependencies.
 *   Rule 2. Add "#INPUT#" and "#OUTPUT#" elements where input/output streams exist.
 *     E.g., " #INPUT# ! ... ! tensor-filter ... ! ... ! #OUTPUT# ".
 *   Rule 3. There should be exactly one pair of #INPUT# and #OUTPUT#.
 *   Rule 4. Supply input/output metadata with input_info & output_info.
 *   This is the simplist method, but restricted to static tensor streams.
 */
int ml_service_server_open_queryserver_static_tensors (ml_service_server_h *h, const char *topic_name, const char * desc, const ml_tensors_info_h input_info, const ml_tensors_info_h output_info);
/**
 * @brief TBU / Query Server AI Service
 * @detail
 *   Rule 1. The pipeline should not have appsink, tensor_sink, appsrc or any other app-thread dependencies.
 *   Rule 2. You may add "#INPUT#" and "#OUTPUT#" elements if you do not know how to use tensor-query-server.
 *     E.g., " #INPUT# ! tensor-filter ... ! ... ! #OUTPUT# ".
 *   Rule 3. There should be exactly one pair of #INPUT# and #OUTPUT#.
 *   Rule 4. Supply input/output metadata with gstcap_in and gstcap_out.
 *   This supports general GStreamer streams and general Tensor streams.
 */
int ml_service_server_open_queryserver_gstcaps (ml_service_server_h *h, const char *topic_name, const char * desc, const char *gstcap_in, const char *gstcap_out);
/**
 * @brief TBU / Query Server AI Service
 * @detail
 *   Rule 1. The pipeline should have a single pair of tensor-query-server-{sink / src}.
 *   Rule 2. The pipeline should not have appsink, tensor_sink, appsrc or any other app-thread dependencies.
 *   Rule 3. There should be exactly one pair of #INPUT# and #OUTPUT# if you use them.
 *   Rule 4. Add capsfilter or capssetter after src and before sink.
 *   This is for seasoned gstreamer/nnstreamer users who have some experiences in pipeline writing.
 */
int ml_service_server_open_queryserver_fulldesc (ml_service_server_h *h, const char *topic_name, const char * desc);


/**
 * @brief TBU / PUB/SUB AI Service
 * @detail
 * use "#OUTPUT#" unless you use fulldesc
 * don't rely on app threads (no appsink, appsrc, tensorsink or so on)
 */
int ml_service_server_open_publisher_static_tensors (ml_service_server_h *h, const char *topic_name, const char * desc, const ml_tensors_data_h out);
int ml_service_server_open_publisher_gstcaps (ml_service_server_h *h, const char *topic_name, const char * desc, const char *gstcap_out);
int ml_service_server_open_publisher_fulldesc (ml_service_server_h *h, const char *topic_name, const char * desc);


/**
 * @brief TBU / Client-side helpers
 * @detail
 *    Please use a pipeline for more efficient usage. This API is for testing or apps that can afford high-latency
 * @param [out] in Input tensors info. Set null if you don't need this info.
 * @param [out] out Output tensors info. Set null if you don't need this info.
 *    Note that we do not know if in/out is possible for remote clients, yet.
 */
int ml_service_client_open_query (ml_service_client_h *h, const char *topic_name, ml_tensors_info_h *in, ml_tensors_info_h *out);
int ml_service_client_open_subscriber (ml_service_client_h *h, const char *topic_name, ml_pipeline_sink_cb func, void *user_data);
int ml_service_client_query (ml_service_client_h h, const ml_tensors_data_h in, ml_tensors_data_h out);
int ml_service_client_close (ml_service_client_h h);

Use case #1

const gchar my_pipeline[] = "videotestsrc is-live=true ! videoconvert ! tensor_converter ! tensor_sink async=false";
gchar *pipeline;
int status;
ml_pipeline_h handle;

status = ml_service_set_pipeline ("my_pipeline", my_pipeline);
status = ml_service_get_pipeline ("my_pipeline", &pipeline);
status = ml_pipeline_construct (pipeline, NULL, NULL, &handle);
...
@taos-ci
Copy link
Collaborator

taos-ci commented Jun 17, 2022

:octocat: cibot: Thank you for posting issue #179. The person in charge will reply soon.

@anyj0527 anyj0527 self-assigned this Jun 29, 2022
@again4you again4you self-assigned this Jul 5, 2022
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants