From 852a3314483bc85934cbf147303684bab01c32ca Mon Sep 17 00:00:00 2001 From: Kaihui-intel Date: Wed, 5 Jul 2023 12:50:36 +0800 Subject: [PATCH] replace rm with remove & update examples Signed-off-by: Kaihui-intel --- .../tf_example1/README.md | 20 ++++++++++++++-- neural_solution/examples/hf_models/README.md | 23 ++++++++++++++++--- neural_solution/launcher.py | 6 ++--- 3 files changed, 41 insertions(+), 8 deletions(-) diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/README.md b/neural_solution/examples/custom_models_optimized/tf_example1/README.md index a683316177e..31a6af782c3 100644 --- a/neural_solution/examples/custom_models_optimized/tf_example1/README.md +++ b/neural_solution/examples/custom_models_optimized/tf_example1/README.md @@ -8,6 +8,7 @@ In this example, we show how to quantize a [custom model](https://github.com/int - Demonstrate how to start the Neural Solution Service. - Demonstrate how to prepare an optimization task request and submit it to Neural Solution Service. - Demonstrate how to query the status of the task and fetch the optimization result. +- Demonstrate how to query and manage the resource of the cluster. ### Requirements Customizing the model requires preparing the following folders and files. @@ -48,12 +49,12 @@ neural_solution -h usage: neural_solution {start,stop} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] [--result_monitor_port RESULT_MONITOR_PORT] [--task_monitor_port TASK_MONITOR_PORT] [--api_type API_TYPE] - [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] + [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] [--query] [--join JOIN] [--remove REMOVE] Neural Solution positional arguments: - {start,stop} start/stop service + {start,stop,cluster} start/stop/management service optional arguments: -h, --help show this help message and exit @@ -73,6 +74,9 @@ optional arguments: specify the running environment for the task --upload_path UPLOAD_PATH specify the file path for the tasks + --query [cluster parameter] query cluster information + --join JOIN [cluster parameter] add new node into cluster + --remove REMOVE [cluster parameter] remove from cluster ``` @@ -145,6 +149,18 @@ When using distributed quantization, the `workers` needs to be set to greater th } } +``` +### Manage resource +```shell +# query cluster information +neural_solution cluster --query + +# add new node into cluster +# parameter: " ; " +neural_solution cluster --join "host1 2 20; host2 5 20" + +# remove node from cluster according to id +neural_solution cluster --remove ``` ### Stop the service ```shell diff --git a/neural_solution/examples/hf_models/README.md b/neural_solution/examples/hf_models/README.md index 75527205380..6012b13c241 100644 --- a/neural_solution/examples/hf_models/README.md +++ b/neural_solution/examples/hf_models/README.md @@ -6,6 +6,7 @@ In this example, we show how to quantize a Hugging Face model with Neural Soluti - Demonstrate how to start the Neural Solution Service. - Demonstrate how to prepare an optimization task request and submit it to Neural Solution Service. - Demonstrate how to query the status of the task and fetch the optimization result. +- Demonstrate how to query and manage the resource of the cluster. ### Start the Neural Solution Service @@ -27,14 +28,14 @@ neural_solution stop neural_solution -h # Help output -usage: neural_solution {start,stop} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] +usage: neural_solution {start,stop,cluster} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] [--result_monitor_port RESULT_MONITOR_PORT] [--task_monitor_port TASK_MONITOR_PORT] [--api_type API_TYPE] - [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] + [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] [--query] [--join JOIN] [--remove REMOVE] Neural Solution positional arguments: - {start,stop} start/stop service + {start,stop,cluster} start/stop/management service optional arguments: -h, --help show this help message and exit @@ -54,6 +55,9 @@ optional arguments: specify the running environment for the task --upload_path UPLOAD_PATH specify the file path for the tasks + --query [cluster parameter] query cluster information + --join JOIN [cluster parameter] add new node into cluster + --remove REMOVE [cluster parameter] remove from cluster ``` @@ -110,6 +114,19 @@ optional arguments: "result_path": "/path/to/projects/neural solution service/workspace/fafdcd3b22004a36bc60e92ec1d646d0/q_model_path" } +``` +### Manage resource +```shell +# query cluster information +neural_solution cluster --query + +# add new node into cluster +# parameter: " ; " +neural_solution cluster --join "host1 2 20; host2 5 20" + +# remove node from cluster according to id +neural_solution cluster --remove + ``` ### Stop the service ```shell diff --git a/neural_solution/launcher.py b/neural_solution/launcher.py index 21922cf7468..218c631b97d 100644 --- a/neural_solution/launcher.py +++ b/neural_solution/launcher.py @@ -377,8 +377,8 @@ def manage_cluster(args): query_cluster(db_path) if args.join: join_node_to_cluster(db_path, args) - if args.rm: - remove_node_from_cluster(db_path, node_id=args.rm) + if args.remove: + remove_node_from_cluster(db_path, node_id=args.remove) def main(): @@ -427,7 +427,7 @@ def main(): "--join", help="[cluster parameter] add new node into cluster" ) parser.add_argument( - "--rm", help="[cluster parameter] remove from cluster" + "--remove", help="[cluster parameter] remove from cluster" ) args = parser.parse_args()