diff --git a/INSTALL.md b/INSTALL.md index f80c7b0d..e383cb55 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -7,7 +7,8 @@ To install VDMS, we must install the necessary dependencies via apt, github, and ### Install Debian/Ubuntu Packages Here we will install the Debian/Ubuntu packages. ```bash -sudo apt-get update +sudo apt-get update -y --fix-missing +sudo apt-get upgrade -y sudo apt-get install -y --no-install-suggests --no-install-recommends \ apt-transport-https autoconf automake bison build-essential bzip2 ca-certificates \ curl ed flex g++-9 gcc-9 git gnupg-agent javacc libarchive-tools libatlas-base-dev \ @@ -55,7 +56,17 @@ alias python=/usr/bin/python3 You can also install the coverage package if interested in running the Python unit tests. ```bash python3 -m pip install --upgrade pip -python3 -m pip install --no-cache-dir "numpy>=1.25.1" "coverage>=7.2.7" +python3 -m pip install --no-cache-dir "numpy>=1.26.0" "coverage>=7.3.1" +``` + + +#### **Valijson v0.6** +This is a headers-only library, no compilation/installation necessary. +```bash +VALIJSON_VERSION="v0.6" +git clone --branch ${VALIJSON_VERSION} https://github.com/tristanpenman/valijson.git $VDMS_DEP_DIR/valijson +cd $VDMS_DEP_DIR/valijson +sudo cp -r include/* /usr/local/include/ ``` @@ -71,36 +82,11 @@ sudo make install ``` -#### **Faiss v1.7.3** -Install the Faiss library for similarity search. -```bash -FAISS_VERSION="v1.7.3" -git clone --branch ${FAISS_VERSION} https://github.com/facebookresearch/faiss.git $VDMS_DEP_DIR/faiss -cd $VDMS_DEP_DIR/faiss -mkdir build && cd build -cmake -DFAISS_ENABLE_GPU=OFF -DPython_EXECUTABLE=/usr/bin/python3 .. -make ${BUILD_THREADS} -sudo make install -``` - - -#### **FLINNG** -Install the Filters to Identify Near-Neighbor Groups (FLINNG) library for similarity search. -```bash -git clone https://github.com/tonyzhang617/FLINNG.git $VDMS_DEP_DIR/FLINNG -cd $VDMS_DEP_DIR/FLINNG -mkdir build && cd build -cmake .. -make ${BUILD_THREADS} -sudo make install -``` - - #### **Protobuf v24.2 (4.24.2)** Install Protobuf (C++ and Python) which requires GoogleTest and Abseil C++ as dependencies. ```bash PROTOBUF_VERSION="24.2" -git clone -b v${PROTOBUF_VERSION} --recursive https://github.com/protocolbuffers/protobuf.git $VDMS_DEP_DIR/protobuf +git clone -b v${PROTOBUF_VERSION} --recurse-submodules https://github.com/protocolbuffers/protobuf.git $VDMS_DEP_DIR/protobuf cd $VDMS_DEP_DIR/protobuf/third_party/googletest mkdir build && cd build @@ -128,42 +114,31 @@ python3 -m pip install --no-cache-dir "protobuf==4.${PROTOBUF_VERSION}" ``` -#### **[OpenCV](https://opencv.org/) 4.5.5** -Below are instructions for installing ***OpenCV v4.5.5***. +#### **Faiss v1.7.3** +Install the Faiss library for similarity search. ```bash -OPENCV_VERSION="4.5.5" -git clone --branch ${OPENCV_VERSION} https://github.com/opencv/opencv.git $VDMS_DEP_DIR/opencv -cd $VDMS_DEP_DIR/opencv +FAISS_VERSION="v1.7.3" +git clone --branch ${FAISS_VERSION} https://github.com/facebookresearch/faiss.git $VDMS_DEP_DIR/faiss +cd $VDMS_DEP_DIR/faiss mkdir build && cd build -cmake -D BUILD_PERF_TESTS=OFF -D BUILD_TESTS=OFF .. +cmake -DFAISS_ENABLE_GPU=OFF -DPython_EXECUTABLE=/usr/bin/python3 .. make ${BUILD_THREADS} sudo make install ``` -**Note**: When using videos, and getting the following error: "Unable to stop the stream: Inappropriate ioctl for device", you may need to include more flags when compiling OpenCV. Follow these instructions ([source](https://stackoverflow.com/questions/41200201/opencv-unable-to-stop-the-stream-inappropriate-ioctl-for-device)): -```bash -apt-get install ffmpeg -apt-get install libavcodec-dev libavformat-dev libavdevice-dev -cmake -D BUILD_PERF_TESTS=OFF -D BUILD_TESTS=OFF -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local \ - -D WITH_FFMPEG=ON -D WITH_TBB=ON -D WITH_GTK=ON \ - -D WITH_V4L=ON -D WITH_OPENGL=ON -D WITH_CUBLAS=ON \ - -DWITH_QT=OFF -DCUDA_NVCC_FLAGS="-D_FORCE_INLINES" .. +#### **FLINNG** +Install the Filters to Identify Near-Neighbor Groups (FLINNG) library for similarity search. +```bash +git clone https://github.com/tonyzhang617/FLINNG.git $VDMS_DEP_DIR/FLINNG +cd $VDMS_DEP_DIR/FLINNG +mkdir build && cd build +cmake .. make ${BUILD_THREADS} sudo make install ``` -#### **Valijson v0.6** -This is a headers-only library, no compilation/installation necessary. -```bash -VALIJSON_VERSION="v0.6" -git clone --branch ${VALIJSON_VERSION} https://github.com/tristanpenman/valijson.git $VDMS_DEP_DIR/valijson -cd $VDMS_DEP_DIR/valijson -sudo cp -r include/* /usr/local/include/ -``` - - #### **[TileDB](https://tiledb.io/) 2.14.1** The directions below will help you install TileDB v2.14.1 from the source. You can also follow the directions listed [here](https://docs.tiledb.io/en/latest/installation.html). @@ -191,21 +166,46 @@ cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_PREFIX_PATH=/usr/local/ -DCMAKE_INSTAL make ${BUILD_THREADS} sudo make install ``` + + +#### **[OpenCV](https://opencv.org/) 4.5.5** +Below are instructions for installing ***OpenCV v4.5.5***. +```bash +OPENCV_VERSION="4.5.5" +git clone --branch ${OPENCV_VERSION} https://github.com/opencv/opencv.git $VDMS_DEP_DIR/opencv +cd $VDMS_DEP_DIR/opencv +mkdir build && cd build +cmake -D BUILD_PERF_TESTS=OFF -D BUILD_TESTS=OFF .. +make ${BUILD_THREADS} +sudo make install +``` + +**Note**: When using videos, and getting the following error: "Unable to stop the stream: Inappropriate ioctl for device", you may need to include more flags when compiling OpenCV. Follow these instructions ([source](https://stackoverflow.com/questions/41200201/opencv-unable-to-stop-the-stream-inappropriate-ioctl-for-device)): +```bash +sudo apt-get install -y ffmpeg +sudo apt-get install -y libavdevice-dev + +cmake -D BUILD_PERF_TESTS=OFF -D BUILD_TESTS=OFF -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local \ + -D WITH_FFMPEG=ON -D WITH_TBB=ON -D WITH_GTK=ON \ + -D WITH_V4L=ON -D WITH_OPENGL=ON -D WITH_CUBLAS=ON \ + -DWITH_QT=OFF -DCUDA_NVCC_FLAGS="-D_FORCE_INLINES" .. +make ${BUILD_THREADS} +sudo make install +```
## Install VDMS This version of VDMS treats PMGD as a submodule so both libraries are compiled at one time. After entering the vdms directory, the command `git submodule update --init --recursive` will pull pmgd into the appropriate directory. Furthermore, Cmake is used to compile all directories. ```bash -git clone -b develop https://github.com/IntelLabs/vdms.git +git clone -b develop --recurse-submodules https://github.com/IntelLabs/vdms.git cd vdms -git submodule update --init --recursive ``` When compiling on a target without Optane persistent memory, use the following: ```bash mkdir build && cd build cmake .. -make -j +make ${BUILD_THREADS} cp ../config-vdms.json . ``` @@ -213,6 +213,6 @@ When compiling on a target with Optane persistent memory, use the command set: ```bash mkdir build && cd build cmake -DCMAKE_CXX_FLAGS='-DPM' .. -make -j +make ${BUILD_THREADS} ``` diff --git a/client/python/setup.py b/client/python/setup.py index ef627cd4..a6c72e0f 100644 --- a/client/python/setup.py +++ b/client/python/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="vdms", - version="0.0.19", + version="0.0.20", author="Chaunté W. Lacewell", author_email="chaunte.w.lacewell@intel.com", description="VDMS Client Module", diff --git a/client/python/vdms/vdms.py b/client/python/vdms/vdms.py index 248d6731..9044858d 100644 --- a/client/python/vdms/vdms.py +++ b/client/python/vdms/vdms.py @@ -42,6 +42,17 @@ class vdms(object): def __init__(self): self.dataNotUsed = [] + self.init_connection() + self.last_response = "" + + def __del__(self): + self.conn.close() + self.connected = False + + def init_connection(self): + if hasattr(self, "conn") and self.conn is not None: + self.conn.close() + self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.conn.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) @@ -51,20 +62,29 @@ def __init__(self): # https://docs.python.org/dev/library/sys.html#sys.platform if sys.platform.startswith("linux"): self.conn.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1) - self.connected = False - self.last_response = "" - - def __del__(self): - self.conn.close() def connect(self, host="localhost", port=55555): - self.conn.connect((host, port)) - self.connected = True + if self.connected is False: + self.init_connection() + self.conn.connect((host, port)) + self.connected = True + return True + else: + print("Connection is already active") + return False def disconnect(self): - self.conn.close() - self.connected = False + if self.connected is True: + self.conn.close() + self.connected = False + return True + else: + print("There is not an active connection") + return False + + def is_connected(self): + return self.connected # Recieves a json struct as a string def query(self, query, blob_array=[]): diff --git a/config-vdms.json b/config-vdms.json index 0d1e5fb0..40b29d7c 100755 --- a/config-vdms.json +++ b/config-vdms.json @@ -6,7 +6,9 @@ // "backup_path":"backups_test", // set this if you want different path to store the back up file "db_root_path": "db", "backup_flag" : "false", - "storage_type": "local", //local, aws, etc + "storage_type": "local", //local, aws + // use_endpoint: [true|false] in case of "storage_type" is equals to "aws", this key is used to specify whether it is going to use a "mocked" AWS connection + "use_endpoint": false, "bucket_name": "minio-bucket", "more-info": "github.com/IntelLabs/vdms" } diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile index ba4a1101..242937ec 100644 --- a/docker/base/Dockerfile +++ b/docker/base/Dockerfile @@ -1,87 +1,139 @@ #Copyright (C) 2023 Intel Corporation #SPDX-License-Identifier: MIT -ARG BASE_VERSION=11.7-slim +ARG BASE_VERSION=11.8-slim ARG BUILD_THREADS="-j16" - -FROM debian:${BASE_VERSION} - +############################################################ +# BASE IMAGE W/ ENV VARS +FROM debian:${BASE_VERSION} as base # Dockerfile limitations force a repetition of global args ARG BUILD_THREADS +ENV DEBIAN_FRONTEND=noninteractive +ENV DEBCONF_NOWARNINGS="yes" +ENV PROTOBUF_VERSION="24.2" +ENV NUMPY_MIN_VERSION="1.26.0" + +############################################################ +# BUILD DEPENDENCIES +FROM base as build + # Install Packages -RUN apt-get update -y && apt-get upgrade -y && apt-get install -y --no-install-suggests --no-install-recommends \ - apt-transport-https autoconf automake bison build-essential bzip2 ca-certificates \ - curl ed flex g++-9 gcc-9 git gnupg-agent javacc libarchive-tools libatlas-base-dev \ - libavcodec-dev libavformat-dev libboost-all-dev libbz2-dev libc-ares-dev libcurl4-openssl-dev \ - libdc1394-22-dev libgflags-dev libgoogle-glog-dev libgtk-3-dev libgtk2.0-dev \ - libhdf5-dev libjpeg-dev libjpeg62-turbo-dev libjsoncpp-dev libleveldb-dev liblmdb-dev \ - liblz4-dev libopenblas-dev libopenmpi-dev libpng-dev librdkafka-dev libsnappy-dev libssl-dev \ - libswscale-dev libtbb-dev libtbb2 libtiff-dev libtiff5-dev libtool libzmq3-dev linux-libc-dev mpich \ - openjdk-11-jdk-headless pkg-config procps python3-dev python3-pip software-properties-common \ - swig unzip uuid-dev && \ +# hadolint ignore=DL3008 +RUN apt-get update -y && apt-get upgrade -y && \ + apt-get install -y --no-install-suggests --no-install-recommends --fix-missing \ + apt-transport-https autoconf automake bison build-essential bzip2 ca-certificates \ + curl ed flex g++-9 gcc-9 git gnupg-agent javacc libarchive-tools libatlas-base-dev \ + libavcodec-dev libavformat-dev libboost-all-dev libbz2-dev libc-ares-dev libcurl4-openssl-dev \ + libdc1394-22-dev libgflags-dev libgoogle-glog-dev libgtk-3-dev libgtk2.0-dev \ + libhdf5-dev libjpeg-dev libjpeg62-turbo-dev libjsoncpp-dev libleveldb-dev liblmdb-dev \ + liblz4-dev libopenblas-dev libopenmpi-dev libpng-dev librdkafka-dev libsnappy-dev libssl-dev \ + libswscale-dev libtbb-dev libtbb2 libtiff-dev libtiff5-dev libtool libzmq3-dev linux-libc-dev mpich \ + openjdk-11-jdk-headless pkg-config procps python3-dev python3-pip software-properties-common \ + swig unzip uuid-dev && \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 1 && \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-9 1 && \ apt-get clean && rm -rf /var/lib/apt/lists/* && \ ln -s /usr/bin/python3 /usr/bin/python # Pull and Install Dependencies -ENV CMAKE_VERSION="v3.27.2" \ - PROTOBUF_VERSION="24.2" \ - OPENCV_VERSION="4.5.5" \ - FAISS_VERSION="v1.7.3" \ +WORKDIR /dependencies +ENV CMAKE_VERSION="v3.27.2" \ VALIJSON_VERSION="v0.6" \ - AWS_SDK_VERSION="1.11.0" \ - TILEDB_VERSION="2.14.1" + FAISS_VERSION="v1.7.3" \ + OPENCV_VERSION="4.5.5" \ + TILEDB_VERSION="2.14.1" \ + AWS_SDK_VERSION="1.11.0" -WORKDIR /dependencies -RUN pip install --no-cache-dir "numpy>=1.25.1" "coverage>=7.2.7" && \ - git clone --branch ${CMAKE_VERSION} https://github.com/Kitware/CMake.git && \ - cd CMake && ./bootstrap && make ${BUILD_THREADS} && make install && cd /dependencies/ && \ - git clone --branch ${FAISS_VERSION} https://github.com/facebookresearch/faiss.git && \ - cd /dependencies/faiss && mkdir build && cd build && \ - cmake -DFAISS_ENABLE_GPU=OFF -DPython_EXECUTABLE=/usr/bin/python3 .. && \ - make ${BUILD_THREADS} && make install && cd /dependencies/ && \ - git clone https://github.com/tonyzhang617/FLINNG.git && \ - cd /dependencies/FLINNG && mkdir build && cd build && cmake .. && \ - make ${BUILD_THREADS} && make install && cd /dependencies && \ - git clone -b v${PROTOBUF_VERSION} --recursive https://github.com/protocolbuffers/protobuf.git && \ +# hadolint ignore=DL3003 +RUN python3 -m pip install --no-cache-dir "numpy>=${NUMPY_MIN_VERSION}" && \ + git clone --branch ${VALIJSON_VERSION} https://github.com/tristanpenman/valijson.git /dependencies/valijson && \ + cd /dependencies/valijson && cp -r include/* /usr/local/include/ && \ + mkdir -p /opt/dist/usr/local/include/ && cp -r include/* /opt/dist/usr/local/include/ + +# hadolint ignore=DL3003,SC2086 +RUN git clone --branch ${CMAKE_VERSION} https://github.com/Kitware/CMake.git /dependencies/CMake && \ + cd /dependencies/CMake && ./bootstrap && make ${BUILD_THREADS} && \ + make install DESTDIR=/opt/dist && make install + +# PROTOBUF & ITS DEPENDENCIES +# hadolint ignore=DL3003,SC2086 +RUN git clone -b "v${PROTOBUF_VERSION}" --recurse-submodules https://github.com/protocolbuffers/protobuf.git /dependencies/protobuf && \ cd /dependencies/protobuf/third_party/googletest && mkdir build && cd build/ && \ cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DBUILD_GMOCK=ON -DCMAKE_CXX_STANDARD=17 .. && \ - make ${BUILD_THREADS} && make install && ldconfig && \ - cd ../../abseil-cpp && mkdir build && cd build && \ + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install && ldconfig && \ + cd /dependencies/protobuf/third_party/abseil-cpp && mkdir build && cd build && \ cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_PREFIX_PATH=/usr/local/ -DCMAKE_INSTALL_PREFIX=/usr/local/ -DABSL_BUILD_TESTING=ON \ -DABSL_ENABLE_INSTALL=ON -DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON -DCMAKE_CXX_STANDARD=17 .. && \ - make ${BUILD_THREADS} && make install && \ + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install && \ cd /dependencies/protobuf && \ cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_CXX_STANDARD=17 \ -Dprotobuf_ABSL_PROVIDER=package -DCMAKE_PREFIX_PATH=/usr/local . && \ - make ${BUILD_THREADS} && make install && \ - python3 -m pip install --no-cache-dir "protobuf==4.${PROTOBUF_VERSION}" && cd /dependencies && \ - git clone --branch ${OPENCV_VERSION} https://github.com/opencv/opencv.git && \ - cd opencv && mkdir build && cd build && cmake -D BUILD_PERF_TESTS=OFF -D BUILD_TESTS=OFF .. && \ - make ${BUILD_THREADS} && make install && cd /dependencies/ && \ - git clone --branch ${VALIJSON_VERSION} https://github.com/tristanpenman/valijson.git && \ - cd valijson && cp -r include/* /usr/local/include/ && cd /dependencies && \ - curl -L -o /dependencies/${TILEDB_VERSION}.tar.gz \ + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install + +# DESCRIPTOR LIBRARIES +# hadolint ignore=DL3003,SC2086 +RUN git clone --branch ${FAISS_VERSION} https://github.com/facebookresearch/faiss.git /dependencies/faiss && \ + cd /dependencies/faiss && mkdir build && cd build && \ + cmake -DFAISS_ENABLE_GPU=OFF -DPython_EXECUTABLE=/usr/bin/python3 .. && \ + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install && \ + git clone https://github.com/tonyzhang617/FLINNG.git /dependencies/FLINNG && \ + cd /dependencies/FLINNG && mkdir build && cd build && cmake .. && \ + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install + +# TILEDB & AWS S3 SDK +# hadolint ignore=DL3003,SC2086 +RUN curl -L -o /dependencies/${TILEDB_VERSION}.tar.gz \ https://github.com/TileDB-Inc/TileDB/archive/refs/tags/${TILEDB_VERSION}.tar.gz && \ cd /dependencies/ && tar -xvf ${TILEDB_VERSION}.tar.gz && cd TileDB-${TILEDB_VERSION} && \ mkdir build && cd build && ../bootstrap --prefix=/usr/local/ && make ${BUILD_THREADS} && \ - make install-tiledb && cd /dependencies && \ - git clone -b ${AWS_SDK_VERSION} --recurse-submodules https://github.com/aws/aws-sdk-cpp && \ - mkdir -p aws-sdk-cpp/build && cd aws-sdk-cpp/build && \ + make install-tiledb DESTDIR=/opt/dist && make install-tiledb && \ + git clone -b ${AWS_SDK_VERSION} --recurse-submodules https://github.com/aws/aws-sdk-cpp /dependencies/aws-sdk-cpp && \ + mkdir -p /dependencies/aws-sdk-cpp/build && cd /dependencies/aws-sdk-cpp/build && \ cmake .. -DCMAKE_BUILD_TYPE=Debug -DCMAKE_PREFIX_PATH=/usr/local/ -DCMAKE_INSTALL_PREFIX=/usr/local/ \ -DBUILD_ONLY="s3" -DCUSTOM_MEMORY_MANAGEMENT=OFF -DENABLE_TESTING=OFF && \ - make ${BUILD_THREADS} && make install && \ - rm -rf /dependencies /usr/local/share/doc /usr/local/share/man + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install + +# OPENCV +# hadolint ignore=DL3003,SC2086 +RUN git clone --branch ${OPENCV_VERSION} https://github.com/opencv/opencv.git /dependencies/opencv && \ + cd /dependencies/opencv && mkdir build && cd build && cmake -D BUILD_PERF_TESTS=OFF -D BUILD_TESTS=OFF .. && \ + make ${BUILD_THREADS} && make install DESTDIR=/opt/dist && make install + +# CLEANUP +RUN rm -rf /dependencies /usr/local/share/doc /usr/local/share/man && \ + mkdir -p /opt/dist/usr/include/x86_64-linux-gnu && \ + cp -rp /usr/include/x86_64-linux-gnu /opt/dist/usr/include/x86_64-linux-gnu + +############################################################ +# FINAL IMAGE +FROM base + +# hadolint ignore=DL3008 +RUN apt-get update -y && apt-get upgrade -y && \ + apt-get install -y --no-install-suggests --no-install-recommends --fix-missing \ + build-essential bzip2 curl g++-9 gcc-9 git javacc libarchive-tools libavcodec-dev libavformat-dev libcurl4-openssl-dev \ + libdc1394-22-dev libgoogle-glog-dev libgtk-3-dev libgtk2.0-dev libhdf5-dev libjpeg-dev libjpeg62-turbo-dev libjsoncpp-dev libopenblas-dev \ + libpng-dev librdkafka-dev libssl-dev libswscale-dev libtbb-dev libtbb2 libtiff-dev libtiff5-dev libzmq3-dev openjdk-11-jdk-headless procps python3-dev python3-pip && \ + update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 1 && \ + update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-9 1 && \ + apt-get clean && rm -rf /var/lib/apt/lists/* && \ + ln -s /usr/bin/python3 /usr/bin/python && \ + python3 -m pip install --no-cache-dir "numpy>=${NUMPY_MIN_VERSION}" "coverage>=7.3.1" "protobuf==4.${PROTOBUF_VERSION}" +COPY --from=build /opt/dist / +RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/all-libs.conf && ldconfig # VDMS WORKDIR /vdms +# hadolint ignore=DL3003,SC2086 RUN git clone -b develop --recurse-submodules https://github.com/IntelLabs/vdms.git /vdms && \ - mkdir -p /vdms/build && cd /vdms/build && cmake .. && make ${BUILD_THREADS} && \ + mkdir -p /vdms/build && cd /vdms/build && \ + cmake .. && make ${BUILD_THREADS} && \ cp /vdms/config-vdms.json /vdms/build/ && \ echo '#!/bin/bash' > /start.sh && echo 'cd /vdms/build' >> /start.sh && \ echo './vdms' >> /start.sh && chmod 755 /start.sh +ENV PYTHONPATH=/vdms/client/python:${PYTHONPATH} +HEALTHCHECK CMD echo "This is a healthcheck test." || exit 1 CMD ["/start.sh"] diff --git a/include/VDMSConfigHelper.h b/include/VDMSConfigHelper.h new file mode 100644 index 00000000..1b7a760b --- /dev/null +++ b/include/VDMSConfigHelper.h @@ -0,0 +1,60 @@ + +/** + * @file VDMSConfigHelper.h + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2023 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ +#pragma once +#include +#include +#include +#include +#include + +#include +#include + +namespace VDMS { + +// E N U M S +enum class StorageType { LOCAL = 0, AWS = 1, INVALID_TYPE = INT_MAX }; + +// C O N S T A N T S +const std::map storage_types_map = { + {"local", StorageType::LOCAL}, {"aws", StorageType::AWS}}; + +const std::map aws_log_level_map = { + {"off", Aws::Utils::Logging::LogLevel::Off}, + {"fatal", Aws::Utils::Logging::LogLevel::Fatal}, + {"error", Aws::Utils::Logging::LogLevel::Error}, + {"warn", Aws::Utils::Logging::LogLevel::Warn}, + {"info", Aws::Utils::Logging::LogLevel::Info}, + {"debug", Aws::Utils::Logging::LogLevel::Debug}, + {"trace", Aws::Utils::Logging::LogLevel::Trace}}; + +} // namespace VDMS diff --git a/include/vcl/DescriptorSet.h b/include/vcl/DescriptorSet.h index 9d1017e8..ee01b6a9 100644 --- a/include/vcl/DescriptorSet.h +++ b/include/vcl/DescriptorSet.h @@ -42,6 +42,8 @@ #include #include +#include + namespace VCL { enum DescriptorSetEngine { @@ -72,7 +74,7 @@ class DescriptorSet { DescriptorSetEngine _eng; RemoteConnection *_remote; - Storage _storage = Storage::LOCAL; + VDMS::StorageType _storage = VDMS::StorageType::LOCAL; void write_set_info(); void read_set_info(const std::string &set_path); diff --git a/include/vcl/Image.h b/include/vcl/Image.h index a872975c..5c52d34d 100644 --- a/include/vcl/Image.h +++ b/include/vcl/Image.h @@ -51,6 +51,8 @@ #include #include +#include "VDMSConfigHelper.h" + namespace VCL { /** @@ -488,7 +490,7 @@ class Image { // Image format and compression type Format _format; CompressionType _compress; - Storage _storage = Storage::LOCAL; + VDMS::StorageType _storage = VDMS::StorageType::LOCAL; // Full path to image std::string _image_id; diff --git a/include/vcl/RemoteConnection.h b/include/vcl/RemoteConnection.h index 642f3ef0..1b5f5f5f 100644 --- a/include/vcl/RemoteConnection.h +++ b/include/vcl/RemoteConnection.h @@ -72,7 +72,6 @@ class RemoteConnection { Aws::S3::S3Client *_aws_client; void ConfigureAws(); - // void SetLogLevelDebug(); void ShutdownAws(); void write_s3(const std::string &path, std::vector data); void write_s3(const std::string &filename); diff --git a/include/vcl/Video.h b/include/vcl/Video.h index 2e0cb851..0c34424a 100644 --- a/include/vcl/Video.h +++ b/include/vcl/Video.h @@ -45,6 +45,7 @@ #include "../utils/include/stats/SystemStats.h" #include "Exception.h" +#include "VDMSConfigHelper.h" #include "utils.h" namespace VCL { @@ -446,7 +447,7 @@ class Video { std::list> _operations; - Storage _storage = Storage::LOCAL; + VDMS::StorageType _storage = VDMS::StorageType::LOCAL; // Remote operation parameters sent by the client Json::Value remoteOp_params; diff --git a/include/vcl/utils.h b/include/vcl/utils.h index f29ceee2..10fc8ff2 100644 --- a/include/vcl/utils.h +++ b/include/vcl/utils.h @@ -57,8 +57,6 @@ enum class CompressionType { RLE = 10 }; -enum class Storage { LOCAL = 0, AWS = 1 }; - static const struct init_rand_t { init_rand_t() { srand(time(NULL)); } } init_rand; diff --git a/src/DescriptorsCommand.cc b/src/DescriptorsCommand.cc index 9f0aa755..a61ce4e8 100644 --- a/src/DescriptorsCommand.cc +++ b/src/DescriptorsCommand.cc @@ -68,6 +68,8 @@ std::string DescriptorsCommand::get_set_path(PMGDQuery &query_tx, Json::Value list_arr; list_arr.append(VDMS_DESC_SET_PATH_PROP); list_arr.append(VDMS_DESC_SET_DIM_PROP); + list_arr.append(VDMS_DESC_SET_ENGIN_PROP); + results["list"] = list_arr; bool unique = true; @@ -104,6 +106,82 @@ bool DescriptorsCommand::check_blob_size(const std::string &blob, const long n_desc) { return (blob.size() / sizeof(float) / dimensions == n_desc); } +// FindDescriptorSet Method +FindDescriptorSet::FindDescriptorSet() + : DescriptorsCommand("FindDescriptorSet") { + _storage_sets = VDMSConfig::instance()->get_path_descriptors(); + _dm->flush(); // store the descriptor set +} +int FindDescriptorSet::construct_protobuf(PMGDQuery &query, + const Json::Value &jsoncmd, + const std::string &blob, int grp_id, + Json::Value &error) { + + const Json::Value &cmd = jsoncmd[_cmd_name]; + Json::Value results = get_value(cmd, "results"); + + const std::string set_name = cmd["set"].asString(); + const std::string set_path = _storage_sets + "/" + set_name; + + Json::Value constraints, link; + Json::Value name_arr; + name_arr.append("=="); + name_arr.append(set_name); + constraints[VDMS_DESC_SET_NAME_PROP] = name_arr; + + Json::Value list_arr; + list_arr.append(VDMS_DESC_SET_NAME_PROP); + list_arr.append(VDMS_DESC_SET_PATH_PROP); + list_arr.append(VDMS_DESC_SET_DIM_PROP); + list_arr.append(VDMS_DESC_SET_ENGIN_PROP); + results["list"] = list_arr; + bool unique = true; + + query.QueryNode(-1, VDMS_DESC_SET_TAG, Json::nullValue, constraints, results, + unique, true); + + return 0; +} + +Json::Value FindDescriptorSet::construct_responses( + Json::Value &json_responses, const Json::Value &json, + protobufs::queryMessage &query_res, const std::string &blob) { + + const Json::Value &cmd = json[_cmd_name]; + Json::Value resp = check_responses(json_responses); + Json::Value ret; + + auto error = [&](Json::Value &res) { + ret[_cmd_name] = res; + return ret; + }; + + if (resp["status"] != RSCommand::Success) { + return error(resp); + } + + /* Get Set information using set name */ + const std::string set_name = cmd["set"].asString(); + const std::string set_path = _storage_sets + "/" + set_name; + try { + VCL::DescriptorSet *desc_set = _dm->get_descriptors_handler(set_path); + resp["status"] = RSCommand::Success; + ret[_cmd_name] = resp; + + if (cmd.isMember("storeIndex") && cmd["storeIndex"].asBool()) { + desc_set->store(); + } + } catch (VCL::Exception e) { + print_exception(e); + resp["status"] = RSCommand::Error; + resp["info"] = "DescriptorSet details not available"; + return -1; + } + + return ret; +} + +//--------------------------------------------------------------- // AddDescriptorSet Methods @@ -190,18 +268,17 @@ Json::Value AddDescriptorSet::construct_responses( // For now, we use the default faiss index. std::string eng_str = get_value(cmd, "engine", "FaissFlat"); - VCL::DescriptorSetEngine eng; if (eng_str == "FaissFlat") - eng = VCL::FaissFlat; + _eng = VCL::FaissFlat; else if (eng_str == "FaissIVFFlat") - eng = VCL::FaissIVFFlat; + _eng = VCL::FaissIVFFlat; else if (eng_str == "TileDBDense") - eng = VCL::TileDBDense; + _eng = VCL::TileDBDense; else if (eng_str == "TileDBSparse") - eng = VCL::TileDBSparse; + _eng = VCL::TileDBSparse; else if (eng_str == "Flinng") - eng = VCL::Flinng; + _eng = VCL::Flinng; else throw ExceptionCommand(DescriptorSetError, "Engine not supported"); @@ -212,7 +289,7 @@ Json::Value AddDescriptorSet::construct_responses( param = new VCL::DescriptorParams(_flinng_num_rows, _flinng_cells_per_row, _flinng_num_hash_tables, _flinng_hashes_per_table); - VCL::DescriptorSet desc_set(desc_set_path, dimensions, eng, metric, param); + VCL::DescriptorSet desc_set(desc_set_path, dimensions, _eng, metric, param); if (_use_aws_storage) { VCL::RemoteConnection *connection = new VCL::RemoteConnection(); @@ -266,6 +343,7 @@ long AddDescriptor::insert_descriptor(const std::string &blob, long label_id = desc_set->get_label_id(label); long *label_ptr = &label_id; id_first = desc_set->add((float *)blob.data(), 1, label_ptr); + } else { id_first = desc_set->add((float *)blob.data(), 1); } @@ -319,7 +397,7 @@ int AddDescriptor::construct_protobuf(PMGDQuery &query, props[VDMS_DESC_LABEL_PROP] = label; int dimensions; - std::string set_path = get_set_path(query, set_name, dimensions); + const std::string set_path = get_set_path(query, set_name, dimensions); if (set_path.empty()) { error["info"] = "Set " + set_name + " not found"; @@ -423,7 +501,7 @@ int ClassifyDescriptor::construct_protobuf(PMGDQuery &query, if (set_path.empty()) { error["status"] = RSCommand::Error; - error["info"] = "DescritorSet Not Found!"; + error["info"] = "DescriptorSet Not Found!"; return -1; } @@ -445,6 +523,7 @@ int ClassifyDescriptor::construct_protobuf(PMGDQuery &query, // Query set node query.QueryNode(get_value(cmd, "_ref", -1), VDMS_DESC_SET_TAG, link, constraints, results, unique); + _dm->flush(); return 0; } @@ -536,7 +615,7 @@ int FindDescriptor::construct_protobuf(PMGDQuery &query, if (set_path.empty()) { cp_result["status"] = RSCommand::Error; - cp_result["info"] = "DescritorSet Not Found!"; + cp_result["info"] = "DescriptorSet Not Found!"; return -1; } diff --git a/src/DescriptorsCommand.h b/src/DescriptorsCommand.h index 30be90fc..36d120e8 100644 --- a/src/DescriptorsCommand.h +++ b/src/DescriptorsCommand.h @@ -39,7 +39,7 @@ #include #include "DescriptorsManager.h" -#include "QueryHandler.h" // to provide the database connection +#include "QueryHandlerPMGD.h" // to provide the database connection #include "tbb/concurrent_unordered_map.h" namespace VDMS { @@ -50,6 +50,7 @@ typedef std::pair, std::vector> IDDistancePair; class DescriptorsCommand : public RSCommand { protected: DescriptorsManager *_dm; + VCL::DescriptorSetEngine _eng; // IDDistancePair is a pointer so that we can free its content // without having to use erase methods, which are not lock free @@ -78,6 +79,21 @@ class DescriptorsCommand : public RSCommand { const std::string &blob) = 0; }; +class FindDescriptorSet : public DescriptorsCommand { + std::string _storage_sets; + +public: + FindDescriptorSet(); + int construct_protobuf(PMGDQuery &tx, const Json::Value &root, + const std::string &blob, int grp_id, + Json::Value &error); + + Json::Value construct_responses(Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); +}; + class AddDescriptorSet : public DescriptorsCommand { std::string _storage_sets; uint64_t _flinng_num_rows; diff --git a/src/QueryHandler.cc b/src/QueryHandler.cc deleted file mode 100644 index 34d15073..00000000 --- a/src/QueryHandler.cc +++ /dev/null @@ -1,576 +0,0 @@ -/** - * @file QueryHandler.cc - * - * @section LICENSE - * - * The MIT License - * - * @copyright Copyright (c) 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - */ - -#include "QueryHandler.h" -#include -#include -#include - -#include "BlobCommand.h" -#include "BoundingBoxCommand.h" -#include "DescriptorsCommand.h" -#include "ImageCommand.h" -#include "VideoCommand.h" - -#include "ExceptionsCommand.h" - -#include "PMGDQuery.h" -#include "QueryMessage.h" -#include "pmgd.h" -#include "util.h" - -#include "APISchema.h" -#include -#include -#include -#include - -using namespace VDMS; - -std::unordered_map QueryHandler::_rs_cmds; -valijson::Schema *QueryHandler::_schema = new valijson::Schema; - -void QueryHandler::init() { - DescriptorsManager::init(); - - _rs_cmds["AddEntity"] = new AddEntity(); - _rs_cmds["UpdateEntity"] = new UpdateEntity(); - _rs_cmds["FindEntity"] = new FindEntity(); - - _rs_cmds["AddConnection"] = new AddConnection(); - _rs_cmds["UpdateConnection"] = new UpdateConnection(); - _rs_cmds["FindConnection"] = new FindConnection(); - - _rs_cmds["AddImage"] = new AddImage(); - _rs_cmds["UpdateImage"] = new UpdateImage(); - _rs_cmds["FindImage"] = new FindImage(); - _rs_cmds["DeleteExpired"] = new DeleteExpired(); - - _rs_cmds["AddDescriptorSet"] = new AddDescriptorSet(); - _rs_cmds["AddDescriptor"] = new AddDescriptor(); - _rs_cmds["FindDescriptor"] = new FindDescriptor(); - _rs_cmds["ClassifyDescriptor"] = new ClassifyDescriptor(); - - _rs_cmds["AddBoundingBox"] = new AddBoundingBox(); - _rs_cmds["UpdateBoundingBox"] = new UpdateBoundingBox(); - _rs_cmds["FindBoundingBox"] = new FindBoundingBox(); - - _rs_cmds["AddVideo"] = new AddVideo(); - _rs_cmds["UpdateVideo"] = new UpdateVideo(); - _rs_cmds["FindVideo"] = new FindVideo(); - _rs_cmds["FindFrames"] = new FindFrames(); - - _rs_cmds["AddBlob"] = new AddBlob(); - _rs_cmds["UpdateBlob"] = new UpdateBlob(); - _rs_cmds["FindBlob"] = new FindBlob(); - - // Load the string containing the schema (api_schema/APISchema.h) - Json::Reader reader; - Json::Value api_schema; - bool parseSuccess = reader.parse(schema_json.c_str(), api_schema); - if (!parseSuccess) { - std::cerr << "Failed to parse API reference schema." << std::endl; - std::cerr << "PANIC! Aborting." << std::endl; - exit(0); - } - - // Parse the json schema into an internal schema format - valijson::SchemaParser parser; - valijson::adapters::JsonCppAdapter schemaDocumentAdapter(api_schema); - try { - parser.populateSchema(schemaDocumentAdapter, *_schema); - } catch (std::exception &e) { - std::cerr << "Failed to load schema: " << e.what() << std::endl; - std::cerr << "PANIC! Aborting." << std::endl; - exit(0); - } -} - -QueryHandler::QueryHandler() - : _pmgd_qh(), _validator(valijson::Validator::kWeakTypes), - _autodelete_init(false), _autoreplicate_init(false) -#ifdef CHRONO_TIMING - , - ch_tx_total("ch_tx_total"), ch_tx_query("ch_tx_query"), - ch_tx_send("ch_tx_send") -#endif -{ -} - -void QueryHandler::process_connection(comm::Connection *c) { - QueryMessage msgs(c); - - try { - while (true) { - protobufs::queryMessage response; - protobufs::queryMessage query = msgs.get_query(); - CHRONO_TIC(ch_tx_total); - - CHRONO_TIC(ch_tx_query); - process_query(query, response); - CHRONO_TAC(ch_tx_query); - - CHRONO_TIC(ch_tx_send); - msgs.send_response(response); - CHRONO_TAC(ch_tx_send); - - CHRONO_TAC(ch_tx_total); - CHRONO_PRINT_LAST_MS(ch_tx_total); - CHRONO_PRINT_LAST_MS(ch_tx_query); - CHRONO_PRINT_LAST_MS(ch_tx_send); - } - } catch (comm::ExceptionComm e) { - print_exception(e); - } -} - -bool QueryHandler::syntax_checker(const Json::Value &root, Json::Value &error) { - valijson::ValidationResults results; - valijson::adapters::JsonCppAdapter user_query(root); - if (!_validator.validate(*_schema, user_query, &results)) { - std::cerr << "API validation failed for:" << std::endl; - std::cerr << root.toStyledString() << std::endl; - - // Will attempt to find the simple error - // To avoid valijson dump - for (int j = 0; j < root.size(); j++) { - const Json::Value &query = root[j]; - if (query.getMemberNames().size() != 1) { - error["info"] = "Error: Only one command per element allowed"; - return false; - } - - const std::string cmd_str = query.getMemberNames()[0]; - auto it = _rs_cmds.find(cmd_str); - if (it == _rs_cmds.end()) { - error["info"] = cmd_str + ": Command not found!"; - return false; - } - } - - valijson::ValidationResults::Error va_error; - unsigned int errorNum = 1; - std::stringstream str_error; - while (results.popError(va_error)) { - std::string context; - std::vector::iterator itr = va_error.context.begin(); - for (; itr != va_error.context.end(); itr++) { - context += *itr; - } - - str_error << "Error #" << errorNum << std::endl - << " context: " << context << std::endl - << " desc: " << va_error.description << std::endl; - ++errorNum; - } - std::cerr << str_error.str(); - error["info"] = str_error.str(); - return false; - } - - for (auto &cmdTop : root) { - const std::string cmd_str = cmdTop.getMemberNames()[0]; - auto &cmd = cmdTop[cmd_str]; - if (cmd.isMember("constraints")) { - for (auto &member : cmd["constraints"].getMemberNames()) { - if (!cmd["constraints"][member].isArray()) { - error["info"] = - "Constraint for property '" + member + "' must be an array"; - return false; - } - auto size = cmd["constraints"][member].size(); - if (size != 2 && size != 4) { - error["info"] = "Constraint for property '" + member + - "' must be an array of size 2 or 4"; - return false; - } - } - } - } - - return true; -} - -int QueryHandler::parse_commands(const protobufs::queryMessage &proto_query, - Json::Value &root) { - Json::Reader reader; - const std::string commands = proto_query.json(); - - try { - bool parseSuccess = reader.parse(commands.c_str(), root); - - if (!parseSuccess) { - root["info"] = "Error parsing the query, ill formed JSON"; - root["status"] = RSCommand::Error; - return -1; - } - - Json::Value error; - if (!syntax_checker(root, error)) { - root = error; - root["status"] = RSCommand::Error; - return -1; - } - - unsigned blob_counter = 0; - for (int j = 0; j < root.size(); j++) { - const Json::Value &query = root[j]; - assert(query.getMemberNames().size() == 1); - std::string cmd = query.getMemberNames()[0]; - - if (_rs_cmds[cmd]->need_blob(query)) { - blob_counter++; - } - } - - if (blob_counter != proto_query.blobs().size()) { - root = error; - root["info"] = std::string( - "Expected blobs: " + std::to_string(blob_counter) + - ". Received blobs: " + std::to_string(proto_query.blobs().size())); - root["status"] = RSCommand::Error; - std::cerr << "Not enough blobs!" << std::endl; - return -1; - } - - } catch (Json::Exception const &) { - root["info"] = "Json Exception at Parsing"; - root["status"] = RSCommand::Error; - return -1; - } - - return 0; -} - -// TODO create a better mechanism to cleanup queries that -// includes feature vectors and user-defined blobs -// For now, we do it for videos/images as a starting point. -void QueryHandler::cleanup_query(const std::vector &images, - const std::vector &videos) { - for (auto &img_path : images) { - VCL::Image img(img_path); - img.delete_image(); - } - - for (auto &vid_path : videos) { - VCL::Video vid(vid_path); - vid.delete_video(); - } -} - -void QueryHandler::process_query(protobufs::queryMessage &proto_query, - protobufs::queryMessage &proto_res) { - Json::FastWriter fastWriter; - - Json::Value root; - Json::Value exception_error; - std::stringstream error_msg; - auto exception_handler = [&]() { - // When exception is catched, we return the message. - std::cerr << "Failed Query: " << std::endl; - std::cerr << root << std::endl; - std::cerr << error_msg.str(); - std::cerr << "End Failed Query: " << std::endl; - exception_error["info"] = error_msg.str(); - exception_error["status"] = RSCommand::Error; - Json::Value response; - response.append(exception_error); - proto_res.set_json(fastWriter.write(response)); - }; - - try { - Json::Value json_responses; - - Json::Value cmd_result; - Json::Value cmd_current; - std::vector images_log; - std::vector videos_log; - std::vector construct_results; - - auto error = [&](Json::Value &res, Json::Value &failed_command) { - cleanup_query(images_log, videos_log); - res["FailedCommand"] = failed_command; - json_responses.clear(); - json_responses.append(res); - proto_res.clear_blobs(); - proto_res.set_json(fastWriter.write(json_responses)); - Json::StyledWriter w; - std::cerr << w.write(json_responses); - }; - - if (parse_commands(proto_query, root) != 0) { - cmd_current = "Transaction"; - error(root, cmd_current); - return; - } - - PMGDQuery pmgd_query(_pmgd_qh); - int blob_count = 0; - - // iterate over the list of the queries - for (int j = 0; j < root.size(); j++) { - const Json::Value &query = root[j]; - std::string cmd = query.getMemberNames()[0]; - - int group_count = pmgd_query.add_group(); - - RSCommand *rscmd = _rs_cmds[cmd]; - - const std::string &blob = - rscmd->need_blob(query) ? proto_query.blobs(blob_count++) : ""; - - int ret_code = rscmd->construct_protobuf(pmgd_query, query, blob, - group_count, cmd_result); - - if (cmd_result.isMember("image_added")) { - images_log.push_back(cmd_result["image_added"].asString()); - } - if (cmd_result.isMember("video_added")) { - videos_log.push_back(cmd_result["video_added"].asString()); - } - - if (ret_code != 0) { - error(cmd_result, root[j]); - return; - } - - construct_results.push_back(cmd_result); - } - - Json::Value &tx_responses = pmgd_query.run(_autodelete_init); - - if (!tx_responses.isArray() || tx_responses.size() != root.size()) { - Json::StyledWriter writer; - std::cerr << "PMGD Response:" << std::endl; - std::cerr << writer.write(tx_responses) << std::endl; - - std::string tx_error_msg("Failed PMGD Transaction"); - if (!tx_responses.isArray() && tx_responses.isMember("info")) { - tx_error_msg += ": " + tx_responses["info"].asString(); - } - - cmd_result["status"] = RSCommand::Error; - cmd_result["info"] = tx_error_msg; - - cmd_current = "Transaction"; - error(cmd_result, cmd_current); - return; - } else { - blob_count = 0; - for (int j = 0; j < root.size(); j++) { - Json::Value &query = root[j]; - std::string cmd = query.getMemberNames()[0]; - - RSCommand *rscmd = _rs_cmds[cmd]; - - const std::string &blob = - rscmd->need_blob(query) ? proto_query.blobs(blob_count++) : ""; - - query["cp_result"] = construct_results[j]; - cmd_result = - rscmd->construct_responses(tx_responses[j], query, proto_res, blob); - - // This is for error handling - if (cmd_result.isMember("status")) { - int status = cmd_result["status"].asInt(); - if (status != RSCommand::Success || status != RSCommand::Empty || - status != RSCommand::Exists) { - error(cmd_result, root[j]); - return; - } - } - json_responses.append(cmd_result); - } - } - proto_res.set_json(fastWriter.write(json_responses)); - _pmgd_qh.cleanup_files(); - - } catch (VCL::Exception &e) { - print_exception(e); - error_msg << "Internal Server Error: VCL Exception at QH" << std::endl; - exception_handler(); - } catch (PMGD::Exception &e) { - print_exception(e); - error_msg << "Internal Server Error: PMGD Exception at QH" << std::endl; - exception_handler(); - } catch (ExceptionCommand &e) { - print_exception(e); - error_msg << "Internal Server Error: Command Exception at QH" << std::endl; - exception_handler(); - } catch (Json::Exception const &e) { - // In case of error on the last fastWriter - error_msg << "Internal Server Error: Json Exception: " << e.what() - << std::endl; - exception_handler(); - // } catch (google::protobuf::FatalException &e) { - // // Need to be carefull with this, may lead to memory leak. - // // Protoubuf is not exception safe. - // error_msg << "Internal Server Error: Protobuf Exception: " << e.what() - // << std::endl; - // exception_handler(); - } catch (const std::invalid_argument &e) { - error_msg << "FATAL: Invalid argument: " << e.what() << std::endl; - exception_handler(); - } catch (const std::exception &e) { - error_msg << "std Exception: " << e.what() << std::endl; - exception_handler(); - } catch (...) { - error_msg << "Unknown Exception" << std::endl; - exception_handler(); - } -} - -void QueryHandler::regular_run_autoreplicate( - ReplicationConfig &replicate_settings) { - std::string command = "bsdtar cvfz "; - std::string name; - std::ostringstream oss; - Json::Value config_file; - std::ofstream file_id; - name.clear(); - auto t = std::time(nullptr); - auto tm = *std::localtime(&t); - oss << asctime(&tm); - name = oss.str(); - name.erase(remove(name.begin(), name.end(), ' '), name.end()); - name.erase(std::remove(name.begin(), name.end(), '\n'), name.end()); - std::string full_name = replicate_settings.backup_path + "/" + name; - - command = command + " " + full_name + ".tar.gz " + - replicate_settings.db_path; // current_date_time - - system(command.c_str()); - - if (replicate_settings.server_port != 0) { - config_file["port"] = replicate_settings.server_port; - } - - if (!full_name.empty()) { - config_file["db_root_path"] = full_name; - } - - if (replicate_settings.autodelete_interval > 0) { - config_file["autodelete_interval"] = - replicate_settings - .autodelete_interval; // expired data removed daily (86400 secs) - } - - if (replicate_settings.expiration_time > 0) { - config_file["expiration_time"] = replicate_settings.expiration_time; - } - - config_file["more-info"] = "github.com/IntelLabs/vdms"; - - if (!replicate_settings.replication_time.empty()) { - config_file["autoreplicate_time"] = replicate_settings.replication_time; - } - - if (!replicate_settings.autoreplication_unit.empty()) { - config_file["unit"] = replicate_settings.autoreplication_unit; - } - - if (replicate_settings.autoreplicate_interval > 0) { - config_file["autoreplicate_interval"] = - replicate_settings.autoreplicate_interval; - } - - if (replicate_settings.max_simultaneous_clients > 0) { - config_file["max_simultaneous_clients"] = - replicate_settings.max_simultaneous_clients; - } - - if (!replicate_settings.backup_flag.empty()) { - config_file["backup_flag"] = replicate_settings.backup_flag; - } - if (!replicate_settings.backup_flag.empty()) { - config_file["backup_path"] = replicate_settings.backup_path; - } - if (!replicate_settings.backup_flag.empty()) { - config_file["images_path"] = replicate_settings.images_path; - } - if (!replicate_settings.backup_flag.empty()) { - config_file["blobs_path"] = replicate_settings.blobs_path; - } - if (!replicate_settings.backup_flag.empty()) { - config_file["descriptor_path"] = replicate_settings.descriptor_path; - } - if (!replicate_settings.backup_flag.empty()) { - config_file["pmgd_num_allocators"] = replicate_settings.pmgd_num_allocators; - } - std::cout << config_file << std::endl; - // write the configuration file - std::string config_file_name = full_name + ".json"; - file_id.open(config_file_name.c_str(), std::ios::out); - file_id << config_file << std::endl; - file_id.close(); - - command = "bsdtar cvfz "; - oss.str(std::string()); - name.clear(); - config_file.clear(); -} -void QueryHandler::reset_autoreplicate_init_flag() { - _autoreplicate_init = true; -} -void QueryHandler::set_autoreplicate_init_flag() { - _autoreplicate_init = false; -} -void QueryHandler::reset_autodelete_init_flag() { _autodelete_init = false; } - -void QueryHandler::set_autodelete_init_flag() { _autodelete_init = true; } - -void QueryHandler::regular_run_autodelete() { - std::string *json_string = new std::string( - "[{\"DeleteExpired\": {\"results\": {\"list\": [\"_expiration\"]}}}]"); - protobufs::queryMessage response; - protobufs::queryMessage query; - query.set_json(json_string->c_str()); - process_query(query, response); - delete json_string; -} - -void QueryHandler::build_autodelete_queue() { - std::string *json_string = new std::string( - "[{\"FindImage\": {\"results\": {\"list\": [\"_expiration\"]}, " - "\"constraints\": {\"_expiration\": [\">\", 0]}}}, {\"FindVideo\": " - "{\"results\": {\"list\": [\"_expiration\"]}, \"constraints\": " - "{\"_expiration\": [\">\", 0]}}}], {\"FindFrames\": {\"results\": " - "{\"list\": [\"_expiration\"]}, \"constraints\": {\"_expiration\": " - "[\">\", 0]}}}], {\"FindDescriptor\": {\"results\": {\"list\": " - "[\"_expiration\"]}, \"constraints\": {\"_expiration\": [\">\", 0]}}}], " - "{\"FindEntity\": {\"results\": {\"list\": [\"_expiration\"]}, " - "\"constraints\": {\"_expiration\": [\">\", 0]}}}"); - protobufs::queryMessage response; - protobufs::queryMessage query; - query.set_json(json_string->c_str()); - process_query(query, response); - delete json_string; -} diff --git a/src/QueryHandler.h b/src/QueryHandler.h deleted file mode 100644 index 61ada1fd..00000000 --- a/src/QueryHandler.h +++ /dev/null @@ -1,95 +0,0 @@ -/** - * @file QueryHandler.h - * - * @section LICENSE - * - * The MIT License - * - * @copyright Copyright (c) 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - */ - -#pragma once -#include -#include -#include -#include - -#include "PMGDQueryHandler.h" // to provide the database connection -#include "RSCommand.h" -#include "Server.h" -#include "chrono/Chrono.h" - -// Json parsing files -#include -#include -#include - -namespace VDMS { - -typedef ::google::protobuf::RepeatedPtrField BlobArray; - -// Instance created per worker thread to handle all transactions on a given -// connection. -class QueryHandler { - friend class QueryHandlerTester; - - static std::unordered_map _rs_cmds; - PMGDQueryHandler _pmgd_qh; - bool _autodelete_init; - bool _autoreplicate_init; - - bool syntax_checker(const Json::Value &root, Json::Value &error); - int parse_commands(const protobufs::queryMessage &proto_query, - Json::Value &root); - void cleanup_query(const std::vector &images, - const std::vector &videos); - - void process_query(protobufs::queryMessage &proto_query, - protobufs::queryMessage &response); - - // valijson - valijson::Validator _validator; - static valijson::Schema *_schema; - -#ifdef CHRONO_TIMING - ChronoCpu ch_tx_total; - ChronoCpu ch_tx_query; - ChronoCpu ch_tx_send; -#endif - -public: - static void init(); - - QueryHandler(); - - void process_connection(comm::Connection *c); - void reset_autodelete_init_flag(); - void set_autodelete_init_flag(); - void regular_run_autodelete(); - void build_autodelete_queue(); - void set_autoreplicate_init_flag(); - void reset_autoreplicate_init_flag(); - void regular_run_autoreplicate(ReplicationConfig &); -}; -} // namespace VDMS diff --git a/src/QueryHandlerExample.cc b/src/QueryHandlerExample.cc index 637cdd9b..ebf895ad 100644 --- a/src/QueryHandlerExample.cc +++ b/src/QueryHandlerExample.cc @@ -29,7 +29,6 @@ * */ -#include "QueryHandler.h" #include #include #include diff --git a/src/QueryHandlerPMGD.cc b/src/QueryHandlerPMGD.cc index 1e35340f..90b4fee9 100644 --- a/src/QueryHandlerPMGD.cc +++ b/src/QueryHandlerPMGD.cc @@ -74,6 +74,7 @@ void QueryHandlerPMGD::init() { _rs_cmds["DeleteExpired"] = new DeleteExpired(); _rs_cmds["AddDescriptorSet"] = new AddDescriptorSet(); + _rs_cmds["FindDescriptorSet"] = new FindDescriptorSet(); _rs_cmds["AddDescriptor"] = new AddDescriptor(); _rs_cmds["FindDescriptor"] = new FindDescriptor(); _rs_cmds["ClassifyDescriptor"] = new ClassifyDescriptor(); @@ -410,6 +411,9 @@ void QueryHandlerPMGD::process_query(protobufs::queryMessage &proto_query, void QueryHandlerPMGD::regular_run_autoreplicate( ReplicationConfig &replicate_settings) { + + DescriptorsManager::instance() + ->flush(); // store all descriptor sets bfore each backup operation std::string command = "bsdtar cvfz "; std::string name; std::ostringstream oss; diff --git a/src/RSCommand.cc b/src/RSCommand.cc index 7acee5a7..688e9f62 100644 --- a/src/RSCommand.cc +++ b/src/RSCommand.cc @@ -36,7 +36,7 @@ #include #include "ExceptionsCommand.h" -#include "QueryHandler.h" +#include "QueryHandlerPMGD.h" #include "VDMSConfig.h" #include "defines.h" #include "vcl/VCL.h" diff --git a/src/VDMSConfig.cc b/src/VDMSConfig.cc index 9d6d442b..d61e72d3 100644 --- a/src/VDMSConfig.cc +++ b/src/VDMSConfig.cc @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -56,6 +57,12 @@ #define DEFAULT_STORAGE_TYPE "local" #define DEFAULT_BUCKET_NAME "vdms_bucket" +// C O N S T A N T S +const std::string KEY_NOT_FOUND = "KEY_NOT_FOUND"; +const std::string DEFAULT_ENDPOINT = "http://127.0.0.1:9000"; +const std::string DEFAULT_AWS_LOG_LEVEL = "off"; +const bool DEFAULT_USE_ENDPOINT = false; + using namespace VDMS; VDMSConfig *VDMSConfig::cfg; @@ -106,6 +113,10 @@ std::string VDMSConfig::get_string_value(std::string val, std::string def) { return json_config.get(val, def).asString(); } +bool VDMSConfig::get_bool_value(std::string val, bool def) { + return json_config.get(val, def).asBool(); +} + // This is a function that createa a directory structure with DIRECTORY_LAYERS // levels with each layer with DIRECTORIES_PER_LAYER ^ n directories. This // function is recursive so will call itself to expand each directory level. @@ -247,17 +258,96 @@ void VDMSConfig::build_dirs() { check_or_create(path_descriptors); // TMP - path_tmp = "/tmp/" + std::string(DEFAULT_PATH_TMP); + path_tmp = std::string(DEFAULT_PATH_TMP); path_tmp = get_string_value(PARAM_DB_TMP, path_tmp); check_or_create(path_tmp); create_directory_layer(&directory_list, path_tmp); + // use_endpoint + use_endpoint = get_bool_value(PARAM_USE_ENDPOINT, DEFAULT_USE_ENDPOINT); + // get storage type, set use_aws flag - storage_type = get_string_value(PARAM_STORAGE_TYPE, DEFAULT_STORAGE_TYPE); - if (storage_type == DEFAULT_STORAGE_TYPE) { - aws_flag = false; + std::string storage_type_value = + get_string_value(PARAM_STORAGE_TYPE, DEFAULT_STORAGE_TYPE); + transform(storage_type_value.begin(), storage_type_value.end(), + storage_type_value.begin(), ::tolower); + + storage_type = StorageType::INVALID_TYPE; + if (storage_types_map.find(storage_type_value) != storage_types_map.end()) { + storage_type = storage_types_map.at(storage_type_value); + } + + std::string value = ""; + aws_flag = false; + if (storage_type != StorageType::INVALID_TYPE) { + switch (storage_type) { + case StorageType::AWS: { + aws_flag = true; + aws_bucket_name = + get_string_value(PARAM_BUCKET_NAME, DEFAULT_BUCKET_NAME); + // if use_endpoint value is true then check for the endpoint value + if (use_endpoint) { + // minio endpoint format: "http://127.0.0.1:9000" + if (exists_key(PARAM_ENDPOINT_OVERRIDE)) { + value = get_string_value(PARAM_ENDPOINT_OVERRIDE, KEY_NOT_FOUND); + endpoint_override = std::optional{value}; + } else { + // If use_endpoint value is true but the "endpoint_override" is not + // specified in the config file then it uses DEFAULT_ENDPOINT + // as default endpoint value + endpoint_override = std::optional{DEFAULT_ENDPOINT}; + } + } + break; + } + case StorageType::LOCAL: { + aws_flag = false; + break; + } + default: + aws_flag = false; + } + } + + // proxy_host + if (exists_key(PARAM_PROXY_HOST)) { + value = get_string_value(PARAM_PROXY_HOST, KEY_NOT_FOUND); + proxy_host = std::optional{value}; } else { - aws_flag = true; - aws_bucket_name = get_string_value(PARAM_BUCKET_NAME, DEFAULT_BUCKET_NAME); + proxy_host = std::nullopt; } + + // proxy_port + if (exists_key(PARAM_PROXY_PORT)) { + value = get_string_value(PARAM_PROXY_PORT, KEY_NOT_FOUND); + proxy_port = std::optional{stoi(value)}; + } else { + proxy_port = std::nullopt; + } + + // proxy_scheme [http|https] + if (exists_key(PARAM_PROXY_SCHEME)) { + value = get_string_value(PARAM_PROXY_SCHEME, KEY_NOT_FOUND); + transform(value.begin(), value.end(), value.begin(), ::tolower); + + proxy_scheme = std::optional{value}; + } else { + proxy_scheme = std::nullopt; + } + + // AWS Log Level + std::string aws_log_level_value = + get_string_value(PARAM_AWS_LOG_LEVEL, DEFAULT_AWS_LOG_LEVEL); + + transform(aws_log_level_value.begin(), aws_log_level_value.end(), + aws_log_level_value.begin(), ::tolower); + + aws_log_level = Aws::Utils::Logging::LogLevel::Off; + if (aws_log_level_map.find(aws_log_level_value) != aws_log_level_map.end()) { + aws_log_level = aws_log_level_map.at(aws_log_level_value); + } +} + +bool VDMSConfig::exists_key(const std::string &key) { + return (json_config[key] != Json::nullValue); } diff --git a/src/VDMSConfig.h b/src/VDMSConfig.h index 7ce7827a..c5cf822a 100644 --- a/src/VDMSConfig.h +++ b/src/VDMSConfig.h @@ -33,12 +33,17 @@ #include #include +#include #include #include #include +#include +#include #include +#include "VDMSConfigHelper.h" + // Parameters in the JSON config file #define PARAM_DB_ROOT "db_root_path" #define PARAM_DB_PMGD "pmgd_path" @@ -74,6 +79,14 @@ #define PARAM_PMGD_NUM_ALLOCATORS "pmgd_num_allocators" #define DEFAULT_PMGD_NUM_ALLOCATORS 1 +// C O N S T A N T S +const std::string PARAM_ENDPOINT_OVERRIDE = "endpoint_override"; +const std::string PARAM_PROXY_HOST = "proxy_host"; +const std::string PARAM_PROXY_PORT = "proxy_port"; +const std::string PARAM_PROXY_SCHEME = "proxy_scheme"; +const std::string PARAM_USE_ENDPOINT = "use_endpoint"; +const std::string PARAM_AWS_LOG_LEVEL = "aws_log_level"; + namespace VDMS { class VDMSConfig { @@ -99,10 +112,17 @@ class VDMSConfig { std::string path_videos; std::string path_descriptors; std::string path_tmp; - std::string storage_type; + StorageType storage_type; bool aws_flag; // use aws flag std::string aws_bucket_name; // aws bucket name + bool use_endpoint; // Use Mocked S3 server or real AWS S3 + + std::optional endpoint_override; + std::optional proxy_host; + std::optional proxy_port; + std::optional proxy_scheme; + Aws::Utils::Logging::LogLevel aws_log_level; VDMSConfig(std::string config_file); @@ -119,6 +139,8 @@ class VDMSConfig { public: int get_int_value(std::string val, int def); std::string get_string_value(std::string val, std::string def); + bool get_bool_value(std::string val, bool def); + bool exists_key(const std::string &key); const std::string &get_path_root() { return path_root; } const std::string &get_path_pmgd() { return path_pmgd; } const std::string &get_path_jpg() { return path_jpg; } @@ -129,9 +151,20 @@ class VDMSConfig { const std::string &get_path_videos() { return path_videos; } const std::string &get_path_descriptors() { return path_descriptors; } const std::string &get_path_tmp() { return path_tmp; } - const std::string &get_storage_type() { return storage_type; } + const StorageType &get_storage_type() { return storage_type; } const std::string &get_bucket_name() { return aws_bucket_name; } - const bool get_aws_flag() { return aws_flag; } + const bool &get_aws_flag() { return aws_flag; } + + std::optional get_endpoint_override() { + return endpoint_override; + } + const std::optional &get_proxy_host() { return proxy_host; } + const std::optional &get_proxy_port() { return proxy_port; } + const std::optional &get_proxy_scheme() { return proxy_scheme; } + const bool &get_use_endpoint() { return use_endpoint; } + const Aws::Utils::Logging::LogLevel get_aws_log_level() & { + return aws_log_level; + } }; }; // namespace VDMS diff --git a/src/vcl/CMakeLists.txt b/src/vcl/CMakeLists.txt index 36e719c7..a99080e3 100644 --- a/src/vcl/CMakeLists.txt +++ b/src/vcl/CMakeLists.txt @@ -8,6 +8,7 @@ find_package( OpenCV REQUIRED ) include_directories(../../include . /usr/local/include/opencv4 /usr/include/jsoncpp) add_library(vcl SHARED + ../VDMSConfig.cc DescriptorSet.cc DescriptorSetData.cc Exception.cc diff --git a/src/vcl/DescriptorSet.cc b/src/vcl/DescriptorSet.cc index a4524546..e4ee990e 100644 --- a/src/vcl/DescriptorSet.cc +++ b/src/vcl/DescriptorSet.cc @@ -180,7 +180,7 @@ void DescriptorSet::store() { // grab the descriptor files from local storage, upload them, delete the local // copies not deleting the local copies currently to resolve concurrency // issues - if (_storage == Storage::AWS) { + if (_storage == VDMS::StorageType::AWS) { std::string dir_path = _set->get_path(); std::vector filenames; @@ -310,6 +310,6 @@ void DescriptorSet::set_connection(RemoteConnection *remote) { } _remote = remote; - _storage = Storage::AWS; + _storage = VDMS::StorageType::AWS; } } // namespace VCL diff --git a/src/vcl/Image.cc b/src/vcl/Image.cc index f996f6cb..6a95207e 100644 --- a/src/vcl/Image.cc +++ b/src/vcl/Image.cc @@ -52,7 +52,6 @@ Image::Read::Read(const std::string &filename, Image::Format format) : Operation(format), _fullpath(filename) {} void Image::Read::operator()(Image *img) { - std::string typestr = img->_storage == Storage::LOCAL ? "LOCAL" : "AWS"; if (_format == Image::Format::TDB) { if (img->_tdb == NULL) @@ -63,7 +62,7 @@ void Image::Read::operator()(Image *img) { img->_height = img->_tdb->get_image_height(); img->_width = img->_tdb->get_image_width(); img->_channels = img->_tdb->get_image_channels(); - } else if (img->_storage == Storage::LOCAL) { + } else if (img->_storage == VDMS::StorageType::LOCAL) { if (_format == Image::Format::BIN) { FILE *bin_file; bin_file = fopen(_fullpath.c_str(), "rb"); @@ -82,7 +81,7 @@ void Image::Read::operator()(Image *img) { throw VCLException(ObjectEmpty, _fullpath + " could not be read, object is empty"); } - } else //_type == S3 + } else //_type == AWS|MINIO { std::vector data = img->_remote->Read(_fullpath); if (!data.empty()) @@ -105,10 +104,10 @@ Image::Write::Write(const std::string &filename, Image::Format format, void Image::Write::operator()(Image *img) { if (_format == Image::Format::TDB) { if (img->_tdb == NULL) { - if (img->_storage == Storage::LOCAL) { + if (img->_storage == VDMS::StorageType::LOCAL) { img->_tdb = new TDBImage(_fullpath); img->_tdb->set_compression(img->_compress); - } else if (img->_storage == Storage::AWS) { + } else if (img->_storage == VDMS::StorageType::AWS) { img->_tdb = new TDBImage(_fullpath, *(img->_remote)); } else { throw VCLException( @@ -118,7 +117,7 @@ void Image::Write::operator()(Image *img) { } if (img->_tdb->has_data()) { - if (img->_storage == Storage::LOCAL) { + if (img->_storage == VDMS::StorageType::LOCAL) { img->_tdb->set_configuration(img->_remote); } img->_tdb->write(_fullpath, _metadata); @@ -143,7 +142,7 @@ void Image::Write::operator()(Image *img) { cv_img = img->_cv_img; if (!cv_img.empty()) { - if (img->_storage == Storage::LOCAL) { + if (img->_storage == VDMS::StorageType::LOCAL) { cv::imwrite(_fullpath, cv_img); } else { std::vector data; @@ -1140,7 +1139,7 @@ void Image::set_connection(RemoteConnection *remote) { } _remote = remote; - _storage = Storage::AWS; + _storage = VDMS::StorageType::AWS; if (_tdb != NULL) { _tdb->set_configuration(remote); diff --git a/src/vcl/RemoteConnection.cc b/src/vcl/RemoteConnection.cc index 8272eb1d..c04b2340 100644 --- a/src/vcl/RemoteConnection.cc +++ b/src/vcl/RemoteConnection.cc @@ -32,6 +32,8 @@ */ #include "../../include/vcl/RemoteConnection.h" +#include "../../include/VDMSConfigHelper.h" +#include "../../src/VDMSConfig.h" using namespace VCL; @@ -64,25 +66,47 @@ void RemoteConnection::ConfigureAws() { Aws::Client::ClientConfiguration clientConfig; - // TODO: proxy / override settings should be user configurable - // use this block for AWS - // clientConfig.proxyHost = "proxy-dmz.intel.com"; - // clientConfig.proxyPort = 912; - // clientConfig.proxyScheme = Aws::Http::Scheme::HTTP; + std::optional value = std::nullopt; + if (value = VDMS::VDMSConfig::instance()->get_proxy_host()) { + clientConfig.proxyHost = *value; + } + + std::optional port_value = std::nullopt; + if (port_value = VDMS::VDMSConfig::instance()->get_proxy_port()) { + clientConfig.proxyPort = *port_value; + } - // use this override for MinIO - clientConfig.endpointOverride = "http://127.0.0.1:9000"; + if (value = VDMS::VDMSConfig::instance()->get_proxy_scheme()) { + if (*value == "http") { + clientConfig.proxyScheme = Aws::Http::Scheme::HTTP; + } else if (*value == "https") { + clientConfig.proxyScheme = Aws::Http::Scheme::HTTPS; + } else { + std::cerr << "Error: Invalid scheme in the config file" << std::endl; + } + } + + // Use this property to set the endpoint for MinIO when the use_endpoint value + // in the config file is equals to true and the storage type is equals to AWS + // Format: "http://127.0.0.1:9000"; + if ((VDMS::VDMSConfig::instance()->get_storage_type() == + VDMS::StorageType::AWS) && + (VDMS::VDMSConfig::instance()->get_use_endpoint()) && + (VDMS::VDMSConfig::instance()->get_endpoint_override())) { + value = VDMS::VDMSConfig::instance()->get_endpoint_override(); + clientConfig.endpointOverride = *value; + } + + // Set AWS Logging level + if (_aws_sdk_options) { + _aws_sdk_options->loggingOptions.logLevel = + VDMS::VDMSConfig::instance()->get_aws_log_level(); + } _aws_client = new Aws::S3::S3Client(clientConfig); _remote_connected = true; } -// TODO make the log level configurable -// void RemoteConnection::SetLogLevelDebug() { -// //_aws_sdk_options.loggingOptions.logLevel = -// // Aws::Utils::Logging::LogLevel::Debug; -// } - void RemoteConnection::ShutdownAws() { // LogEntry(__FUNCTION__); Aws::ShutdownAPI(*_aws_sdk_options); @@ -159,7 +183,7 @@ void RemoteConnection::Remove_Object(const std::string &path) { } } -//########Private S3 Functions######## +// ########Private S3 Functions######## void RemoteConnection::write_s3(const std::string &filename) { Aws::S3::Model::PutObjectRequest put_request; diff --git a/src/vcl/Video.cc b/src/vcl/Video.cc index 9d3eb788..eb238ef6 100644 --- a/src/vcl/Video.cc +++ b/src/vcl/Video.cc @@ -30,6 +30,8 @@ #include #include +#include "../VDMSConfig.h" +#include "VDMSConfigHelper.h" #include "vcl/Video.h" using namespace VCL; @@ -50,7 +52,8 @@ Video::Video(const std::string &video_id) : Video() { } Video::Video(void *buffer, long size) : Video() { - std::string uname = create_unique("/tmp/tmp/", "vclvideoblob"); + std::string uname = create_unique( + VDMS::VDMSConfig::instance()->get_path_tmp(), "vclvideoblob"); std::ofstream outfile(uname, std::ofstream::binary); _remote = nullptr; @@ -719,7 +722,7 @@ void Video::set_connection(RemoteConnection *remote) { } _remote = remote; - _storage = Storage::AWS; + _storage = VDMS::StorageType::AWS; } /* *********************** */ diff --git a/tests/cleandbs.sh b/tests/cleandbs.sh index b8f1b227..d4c3a0ac 100755 --- a/tests/cleandbs.sh +++ b/tests/cleandbs.sh @@ -2,7 +2,7 @@ rm -r jsongraph qhgraph simpleAdd_db simpleAddx10_db simpleUpdate_db rm -r entitycheck_db datatypecheck_db db_backup test_db_1 rm tests_log.log tests_screen.log tests_remote_screen.log tests_remote_log.log tests_udf_screen.log tests_udf_log.log -rm -r tdb +rm -rf tdb/ rm -r db dbs test_db_client rm -r temp rm -r videos_tests @@ -11,4 +11,8 @@ rm test_images/tdb_to_jpg.jpg rm test_images/tdb_to_png.png rm test_images/test_image.jpg rm remote_function_test/tmpfile* -rm -r backups \ No newline at end of file +rm -r backups +echo 'Removing temporary files' +rm -rf ../minio_files +rm -rf ../test_db +rm -rf ../test_db_aws diff --git a/tests/python/TestBoundingBox.py b/tests/python/TestBoundingBox.py index d8c50bb7..57fc3335 100644 --- a/tests/python/TestBoundingBox.py +++ b/tests/python/TestBoundingBox.py @@ -127,6 +127,8 @@ def test_addBoundingBox(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(len(response), self.number_of_inserts) for i in range(0, self.number_of_inserts): self.assertEqual(response[i]["AddBoundingBox"]["status"], 0) @@ -161,6 +163,8 @@ def test_findBoundingBox(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) self.assertEqual(response[1]["FindBoundingBox"]["status"], 0) self.assertEqual( @@ -200,6 +204,8 @@ def test_findBoundingBoxCoordinates(self): response, img_array = db.query(all_queries) + self.disconnect(db) + for i in range(0, self.number_of_inserts): self.assertEqual(response[i]["FindBoundingBox"]["status"], 0) self.assertEqual( @@ -258,6 +264,8 @@ def test_addBoundingBoxWithImage(self): response, res_arr = db.query(all_queries, [imgs_arr]) + self.disconnect(db) + self.assertEqual(response[0]["AddImage"]["status"], 0) self.assertEqual(response[1]["AddBoundingBox"]["status"], 0) @@ -294,6 +302,8 @@ def test_findBoundingBoxesInImage(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(response[0]["FindImage"]["status"], 0) self.assertEqual(response[1]["FindBoundingBox"]["status"], 0) self.assertEqual( @@ -346,6 +356,8 @@ def test_findBoundingBoxByCoordinates(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) def test_findBoundingBoxBlob(self): @@ -381,6 +393,8 @@ def test_findBoundingBoxBlob(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(len(img_array), self.number_of_inserts) for i in range(0, self.number_of_inserts): coord = self.number_of_inserts - i - 1 @@ -425,6 +439,8 @@ def test_findBoundingBoxBlobComplex(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) self.assertTrue(len(img_array) >= self.number_of_inserts) for i in range(0, self.number_of_inserts): @@ -482,6 +498,8 @@ def test_updateBoundingBox(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) self.assertEqual( response[0]["FindBoundingBox"]["entities"][0]["name"], "updated_bb_0" @@ -519,7 +537,13 @@ def test_updateBoundingBoxCoords(self): response, img_array = db.query(all_queries) + if response[0]["UpdateBoundingBox"]["status"] != 0: + self.disconnect(db) + self.assertEqual(response[0]["UpdateBoundingBox"]["status"], 0) + + if response[0]["UpdateBoundingBox"]["count"] != 1: + self.disconnect(db) self.assertEqual(response[0]["UpdateBoundingBox"]["count"], 1) all_queries = [] @@ -540,6 +564,8 @@ def test_updateBoundingBoxCoords(self): response, img_array = db.query(all_queries) + self.disconnect(db) + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) self.assertEqual( response[0]["FindBoundingBox"]["entities"][0]["_coordinates"]["x"], 15 diff --git a/tests/python/TestCommand.py b/tests/python/TestCommand.py index 4127947b..40964916 100644 --- a/tests/python/TestCommand.py +++ b/tests/python/TestCommand.py @@ -32,6 +32,8 @@ class TestCommand(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestCommand, self).__init__(*args, **kwargs) + # Flag for displaying debug messages + self.verbose = False # VDMS Server Info self.hostname = "localhost" @@ -40,9 +42,9 @@ def __init__(self, *args, **kwargs): db_up = False attempts = 0 + db = vdms.vdms() while not db_up: try: - db = vdms.vdms() db.connect(self.hostname, self.port) db.disconnect() db_up = True @@ -75,13 +77,50 @@ def __init__(self, *args, **kwargs): def create_connection(self): db = vdms.vdms() - db.connect(self.hostname, self.port) + if db.is_connected() is False: + db.connect(self.hostname, self.port) + if self.verbose is True: + print( + "Connection created for hostname:", + self.hostname, + "and port:", + str(self.port), + ) + else: + if self.verbose is True: + print( + "Connection is already active for hostname:", + self.hostname, + "and port:", + str(self.port), + ) return db + def disconnect(self, db): + if db is not None: + if db.is_connected() is True: + db.disconnect() + if self.verbose is True: + print( + "Disconnection done for hostname:", + self.hostname, + "and port:", + str(self.port), + ) + else: + if self.verbose is True: + print( + "disconnect() was not executed for hostname:", + self.hostname, + "and port:", + str(self.port), + ) + def addEntity( self, class_name, + db, properties=None, constraints=None, blob=False, # Generic blob @@ -101,8 +140,6 @@ def addEntity( all_queries = [] all_queries.append(query) - db = self.create_connection() - if not blob: response, res_arr = db.query(all_queries) else: diff --git a/tests/python/TestConnections.py b/tests/python/TestConnections.py index 66e5064c..d7a6b466 100644 --- a/tests/python/TestConnections.py +++ b/tests/python/TestConnections.py @@ -38,7 +38,7 @@ def test_FindEntity_link_constraints_float(self): props["age"] = 29 response, arr = self.addEntity( - "felcflo_People", properties=props, check_status=True + "felcflo_People", db=db, properties=props, check_status=True ) props = {} @@ -46,7 +46,7 @@ def test_FindEntity_link_constraints_float(self): props["name"] = "alligator" response, arr = self.addEntity( - "felcflo_foo", properties=props, check_status=True + "felcflo_foo", db=db, properties=props, check_status=True ) props = {} @@ -54,7 +54,7 @@ def test_FindEntity_link_constraints_float(self): props["name"] = "cat" response, arr = self.addEntity( - "felcflo_foo", properties=props, check_status=True + "felcflo_foo", db=db, properties=props, check_status=True ) all_queries = [] @@ -206,6 +206,8 @@ def test_FindEntity_link_constraints_float(self): response, res_arr = db.query(all_queries) self.assertEqual(len(response[1]["FindEntity"]["entities"]), 0) + self.disconnect(db) + def test_FindEntity_link_constraints_string(self): db = self.create_connection() @@ -215,7 +217,7 @@ def test_FindEntity_link_constraints_string(self): props["age"] = 29 response, arr = self.addEntity( - "felcstr_People", properties=props, check_status=True + "felcstr_People", db=db, properties=props, check_status=True ) props = {} @@ -223,7 +225,7 @@ def test_FindEntity_link_constraints_string(self): props["name"] = "alligator" response, arr = self.addEntity( - "felcstr_foo", properties=props, check_status=True + "felcstr_foo", db=db, properties=props, check_status=True ) props = {} @@ -231,7 +233,7 @@ def test_FindEntity_link_constraints_string(self): props["name"] = "cat" response, arr = self.addEntity( - "felcstr_foo", properties=props, check_status=True + "felcstr_foo", db=db, properties=props, check_status=True ) all_queries = [] @@ -400,3 +402,5 @@ def test_FindEntity_link_constraints_string(self): response, res_arr = db.query(all_queries) self.assertEqual(len(response[1]["FindEntity"]["entities"]), 1) self.assertEqual(response[1]["FindEntity"]["entities"][0]["name"], "cat") + + self.disconnect(db) diff --git a/tests/python/TestDescriptors.py b/tests/python/TestDescriptors.py index 7326d22a..dcfc3b9a 100644 --- a/tests/python/TestDescriptors.py +++ b/tests/python/TestDescriptors.py @@ -49,6 +49,7 @@ def addSet(self, name, dim, metric, engine): # Check success self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + self.disconnect(db) def test_addSet(self): db = self.create_connection() @@ -68,6 +69,7 @@ def test_addSet(self): # Check success self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + self.disconnect(db) def test_addSetAndDescriptors(self): db = self.create_connection() @@ -112,6 +114,8 @@ def test_addSetAndDescriptors(self): # Check success self.assertEqual(response[0]["AddDescriptor"]["status"], 0) + self.disconnect(db) + def test_addSetAndDescriptorsDimMismatch(self): db = self.create_connection() @@ -180,6 +184,8 @@ def test_addSetAndDescriptorsDimMismatch(self): self.assertEqual(response[0]["status"], -1) self.assertEqual(response[0]["info"], "Blob Dimensions Mismatch") + self.disconnect(db) + def test_addDescriptorsx1000(self): db = self.create_connection() @@ -225,6 +231,7 @@ def test_addDescriptorsx1000(self): # Check success for x in range(0, total - 1): self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + self.disconnect(db) def test_classifyDescriptor(self): db = self.create_connection() @@ -300,3 +307,4 @@ def test_classifyDescriptor(self): self.assertEqual( response[0]["ClassifyDescriptor"]["label"], "class" + str(int(i / 4)) ) + self.disconnect(db) diff --git a/tests/python/TestEngineDescriptors.py b/tests/python/TestEngineDescriptors.py index b26e4b82..1bba0c96 100644 --- a/tests/python/TestEngineDescriptors.py +++ b/tests/python/TestEngineDescriptors.py @@ -49,6 +49,7 @@ def addSet(self, name, dim, metric, engine): # Check success self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + self.disconnect(db) def test_addDifferentSets(self): self.addSet("128-L2-FaissFlat", 128, "L2", "FaissFlat") @@ -115,6 +116,7 @@ def test_addDescriptorsx1000FaissIVFFlat(self): # Check success for x in range(0, total - 1): self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + self.disconnect(db) def test_addDescriptorsx1000TileDBSparse(self): db = self.create_connection() @@ -163,6 +165,7 @@ def test_addDescriptorsx1000TileDBSparse(self): # Check success for x in range(0, total - 1): self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + self.disconnect(db) def test_addDescriptorsx1000TileDBDense(self): db = self.create_connection() @@ -212,3 +215,4 @@ def test_addDescriptorsx1000TileDBDense(self): # Check success for x in range(0, total - 1): self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + self.disconnect(db) diff --git a/tests/python/TestEntities.py b/tests/python/TestEntities.py index 06be0826..dcdcfd99 100644 --- a/tests/python/TestEntities.py +++ b/tests/python/TestEntities.py @@ -29,7 +29,7 @@ class TestEntities(TestCommand.TestCommand): - def addSingleEntity(self, thID, results): + def addSingleEntity(self, thID, results, db): props = {} props["name"] = "Luis" props["lastname"] = "Ferro" @@ -37,7 +37,7 @@ def addSingleEntity(self, thID, results): props["threadid"] = thID response, arr = self.addEntity( - "AwesomePeople", properties=props, check_status=False + "AwesomePeople", db=db, properties=props, check_status=False ) try: @@ -47,9 +47,7 @@ def addSingleEntity(self, thID, results): results[thID] = 0 - def findEntity(self, thID, results): - db = self.create_connection() - + def findEntity(self, thID, results, db): constraints = {} constraints["threadid"] = ["==", thID] @@ -85,10 +83,14 @@ def test_runMultipleAdds(self): concurrency = 32 thread_arr = [] results = [None] * concurrency + connections_arr = [] + for i in range(0, concurrency): - thread_add = Thread(target=self.addSingleEntity, args=(i, results)) + db = self.create_connection() + thread_add = Thread(target=self.addSingleEntity, args=(i, results, db)) thread_add.start() thread_arr.append(thread_add) + connections_arr.append(db) idx = 0 error_counter = 0 @@ -100,19 +102,29 @@ def test_runMultipleAdds(self): self.assertEqual(error_counter, 0) + for i in range(0, len(connections_arr)): + self.disconnect(connections_arr[i]) + thread_arr = [] + connections_arr = [] # Tests concurrent AddEntities and FindEntities (that should exists) results = [None] * concurrency * 2 for i in range(0, concurrency): + db1 = self.create_connection() addidx = concurrency + i - thread_add = Thread(target=self.addSingleEntity, args=(addidx, results)) + thread_add = Thread( + target=self.addSingleEntity, args=(addidx, results, db1) + ) thread_add.start() thread_arr.append(thread_add) + connections_arr.append(db1) - thread_find = Thread(target=self.findEntity, args=(i, results)) + db2 = self.create_connection() + thread_find = Thread(target=self.findEntity, args=(i, results, db2)) thread_find.start() thread_arr.append(thread_find) + connections_arr.append(db2) idx = 0 error_counter = 0 @@ -125,10 +137,15 @@ def test_runMultipleAdds(self): self.assertEqual(error_counter, 0) + for i in range(0, len(connections_arr)): + self.disconnect(connections_arr[i]) + def test_addFindEntity(self): results = [None] * 1 - self.addSingleEntity(0, results) - self.findEntity(0, results) + db = self.create_connection() + self.addSingleEntity(0, results, db) + self.findEntity(0, results, db) + db.disconnect() def test_addEntityWithLink(self): db = self.create_connection() @@ -176,6 +193,7 @@ def test_addEntityWithLink(self): self.assertEqual(response[0]["AddEntity"]["status"], 0) self.assertEqual(response[1]["AddEntity"]["status"], 0) + db.disconnect() def test_addfindEntityWrongConstraints(self): db = self.create_connection() @@ -232,6 +250,7 @@ def test_addfindEntityWrongConstraints(self): response[0]["info"], "Constraint for property 'name' must be an array of size 2 or 4", ) + db.disconnect() def test_FindWithSortKey(self): db = self.create_connection() @@ -280,6 +299,7 @@ def test_FindWithSortKey(self): self.assertEqual(response[0]["FindEntity"]["status"], 0) for i in range(0, number_of_inserts): self.assertEqual(response[0]["FindEntity"]["entities"][i]["id"], i) + db.disconnect() def test_FindWithSortBlock(self): db = self.create_connection() @@ -360,3 +380,4 @@ def test_FindWithSortBlock(self): response[0]["FindEntity"]["entities"][i]["id"], number_of_inserts - 1 - i, ) + db.disconnect() diff --git a/tests/python/TestEntitiesBlobs.py b/tests/python/TestEntitiesBlobs.py index cbfd7477..bcfe76f1 100644 --- a/tests/python/TestEntitiesBlobs.py +++ b/tests/python/TestEntitiesBlobs.py @@ -56,6 +56,7 @@ def test_addEntityWithBlob(self, thID=0): response, res_arr = db.query(all_queries, [blob_arr]) self.assertEqual(response[0]["AddEntity"]["status"], 0) + self.disconnect(db) def test_addEntityWithBlobNoBlob(self, thID=0): db = self.create_connection() @@ -81,6 +82,7 @@ def test_addEntityWithBlobNoBlob(self, thID=0): self.assertEqual(response[0]["status"], -1) self.assertEqual(response[0]["info"], "Expected blobs: 1. Received blobs: 0") + self.disconnect(db) def test_addEntityWithBlobAndFind(self, thID=0): db = self.create_connection() @@ -136,3 +138,4 @@ def test_addEntityWithBlobAndFind(self, thID=0): self.assertEqual(len(res_arr), len(blob_arr)) self.assertEqual(len(res_arr[0]), len(blob_arr[0])) self.assertEqual((res_arr[0]), (blob_arr[0])) + self.disconnect(db) diff --git a/tests/python/TestFindDescriptorSet.py b/tests/python/TestFindDescriptorSet.py new file mode 100644 index 00000000..ea6df170 --- /dev/null +++ b/tests/python/TestFindDescriptorSet.py @@ -0,0 +1,55 @@ +import TestCommand + + +class TestFindDescriptorSet(TestCommand.TestCommand): + def addSet(self, name, dim, metric, engine): + db = self.create_connection() + + all_queries = [] + descriptor_set = {} + descriptor_set["name"] = name + descriptor_set["dimensions"] = dim + descriptor_set["metric"] = metric + descriptor_set["engine"] = engine + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + # Execute the query + response, img_array = db.query(all_queries) + + # Check if the query was successful (you can add your own checks here) + if "AddDescriptorSet" in response[0]: + status = response[0]["AddDescriptorSet"].get("status") + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + def test_findDescriptorSet(self): + db = self.create_connection() + name = "testFindDescriptorSet-new" + dim = 128 + engine = "FaissFlat" + metric = "L2" + + self.addSet(name, dim, metric, engine) + + all_queries = [] + + storeIndex = True + + descriptor_set = {} + descriptor_set["set"] = name + descriptor_set["storeIndex"] = storeIndex + + query = {} + + query["FindDescriptorSet"] = descriptor_set + + all_queries.append(query) + + # Execute the query + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindDescriptorSet"]["status"], 0) + self.assertEqual(response[0]["FindDescriptorSet"]["returned"], 1) diff --git a/tests/python/TestFindDescriptors.py b/tests/python/TestFindDescriptors.py index ba3d0c8f..794b44fc 100644 --- a/tests/python/TestFindDescriptors.py +++ b/tests/python/TestFindDescriptors.py @@ -82,6 +82,8 @@ def create_set_and_insert(self, set_name, dims, total, labels=True): for x in range(0, total): self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + self.disconnect(db) + # @unittest.skip("Skipping class until fixed") def test_findDescByConstraints(self): # Add Set @@ -119,6 +121,7 @@ def test_findDescByConstraints(self): self.assertEqual(response[0]["FindDescriptor"]["status"], 0) self.assertEqual(response[0]["FindDescriptor"]["returned"], 1) self.assertEqual(response[0]["FindDescriptor"]["entities"][0]["myid"], 202) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescUnusedRef(self): @@ -155,6 +158,7 @@ def test_findDescUnusedRef(self): self.assertEqual(response[0]["FindDescriptor"]["status"], 0) self.assertEqual(response[0]["FindDescriptor"]["returned"], 1) self.assertEqual(response[0]["FindDescriptor"]["entities"][0]["myid"], 202) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByConst_get_id(self): @@ -191,6 +195,7 @@ def test_findDescByConst_get_id(self): self.assertEqual(response[0]["FindDescriptor"]["status"], 0) self.assertEqual(response[0]["FindDescriptor"]["returned"], 1) self.assertEqual(response[0]["FindDescriptor"]["entities"][0]["myid"], 202) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByConst_blobTrue(self): @@ -230,6 +235,7 @@ def test_findDescByConst_blobTrue(self): self.assertEqual(response[0]["FindDescriptor"]["entities"][0]["myid"], 202) self.assertEqual(len(fv_array), 1) self.assertEqual(len(fv_array[0]), dims * 4) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByConst_multiple_blobTrue(self): @@ -270,6 +276,7 @@ def test_findDescByConst_multiple_blobTrue(self): self.assertEqual(response[0]["FindDescriptor"]["entities"][1]["myid"], 201) self.assertEqual(len(fv_array), 3) self.assertEqual(len(fv_array[0]), dims * 4) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByBlob(self): @@ -317,6 +324,7 @@ def test_findDescByBlob(self): self.assertEqual(response[0]["FindDescriptor"]["entities"][0]["_distance"], 0) self.assertEqual(response[0]["FindDescriptor"]["entities"][1]["_distance"], 400) self.assertEqual(response[0]["FindDescriptor"]["entities"][2]["_distance"], 400) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByBlobNoLabels(self): @@ -361,6 +369,7 @@ def test_findDescByBlobNoLabels(self): # Check success self.assertEqual(response[0]["FindDescriptor"]["status"], 0) self.assertEqual(response[0]["FindDescriptor"]["returned"], kn) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByBlobNoResults(self): @@ -403,6 +412,7 @@ def test_findDescByBlobNoResults(self): self.assertEqual(response[0]["FindDescriptor"]["returned"], 0) # self.assertEqual(len(blob_array), kn) # self.assertEqual(descriptor_blob[0], blob_array[0]) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByBlobUnusedRef(self): @@ -446,6 +456,7 @@ def test_findDescByBlobUnusedRef(self): self.assertEqual(response[0]["FindDescriptor"]["returned"], kn) self.assertEqual(len(blob_array), kn) self.assertEqual(descriptor_blob[0], blob_array[0]) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByBlobAndConstraints(self): @@ -496,6 +507,7 @@ def test_findDescByBlobAndConstraints(self): self.assertEqual(response[0]["FindDescriptor"]["returned"], 1) self.assertEqual(response[0]["FindDescriptor"]["entities"][0]["_distance"], 0) + self.disconnect(db) # @unittest.skip("Skipping class until fixed") def test_findDescByBlobWithLink(self): @@ -636,3 +648,4 @@ def test_findDescByBlobWithLink(self): self.assertEqual(response[1]["FindEntity"]["entities"][0]["entity_prop"], 200) self.assertEqual(response[1]["FindEntity"]["entities"][1]["entity_prop"], 201) self.assertEqual(response[1]["FindEntity"]["entities"][2]["entity_prop"], 202) + self.disconnect(db) diff --git a/tests/python/TestImages.py b/tests/python/TestImages.py index b4c4ec6e..2e48002a 100644 --- a/tests/python/TestImages.py +++ b/tests/python/TestImages.py @@ -97,6 +97,7 @@ def test_addImage(self): self.assertEqual(len(response), number_of_inserts) for i in range(0, number_of_inserts): self.assertEqual(response[i]["AddImage"]["status"], 0) + self.disconnect(db) def test_findEntityImage(self): db = self.create_connection() @@ -137,6 +138,7 @@ def test_findEntityImage(self): self.assertEqual( response[1]["FindEntity"]["entities"][0]["name"], prefix_name + "1" ) + self.disconnect(db) def test_findImage(self): db = self.create_connection() @@ -167,6 +169,7 @@ def test_findImage(self): self.assertEqual(response[0]["FindImage"]["status"], 0) self.assertEqual(response[1]["FindImage"]["status"], 0) self.assertEqual(len(img_array), 2) + self.disconnect(db) def test_findImageResults(self): db = self.create_connection() @@ -207,6 +210,7 @@ def test_findImageResults(self): response[1]["FindImage"]["entities"][0]["name"], prefix_name + "1" ) self.assertEqual(len(img_array), 2) + self.disconnect(db) def test_addImageWithLink(self): db = self.create_connection() @@ -260,6 +264,7 @@ def test_addImageWithLink(self): self.assertEqual(response[0]["AddEntity"]["status"], 0) self.assertEqual(response[1]["AddImage"]["status"], 0) + self.disconnect(db) def test_findImage_multiple_results(self): db = self.create_connection() @@ -292,6 +297,7 @@ def test_findImage_multiple_results(self): self.assertEqual(len(img_array), number_of_inserts) self.assertEqual(response[0]["FindImage"]["status"], 0) self.assertEqual(response[0]["FindImage"]["returned"], number_of_inserts) + self.disconnect(db) def test_findImageNoBlob(self): db = self.create_connection() @@ -327,6 +333,7 @@ def test_findImageNoBlob(self): self.assertEqual(response[0]["FindImage"]["status"], 0) self.assertEqual(response[1]["FindImage"]["status"], 0) self.assertEqual(len(img_array), 0) + self.disconnect(db) def test_findImageRefNoBlobNoPropsResults(self): db = self.create_connection() @@ -364,6 +371,7 @@ def test_findImageRefNoBlobNoPropsResults(self): self.assertEqual(response[0]["FindImage"]["status"], 0) self.assertEqual(len(img_array), 0) + self.disconnect(db) def test_updateImage(self): db = self.create_connection() @@ -396,6 +404,7 @@ def test_updateImage(self): self.assertEqual(response[0]["UpdateImage"]["count"], 1) self.assertEqual(len(img_array), 0) + self.disconnect(db) def ztest_zFindImageWithCollection(self): db = self.create_connection() @@ -431,3 +440,4 @@ def ztest_zFindImageWithCollection(self): self.assertEqual(response[0]["FindImage"]["status"], 0) self.assertEqual(len(img_array), number_of_inserts) + self.disconnect(db) diff --git a/tests/python/TestRetail.py b/tests/python/TestRetail.py index f4872990..6dc50474 100644 --- a/tests/python/TestRetail.py +++ b/tests/python/TestRetail.py @@ -54,6 +54,7 @@ def add_descriptor_set(self, name, dim): # Check success self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + self.disconnect(db) def build_store(self): db = self.create_connection() @@ -159,6 +160,7 @@ def build_store(self): self.assertEqual(response[(i - 1) * 4 + 2]["AddEntity"]["status"], 0) self.assertEqual(response[(i - 1) * 4 + 3]["AddConnection"]["status"], 0) self.assertEqual(response[(i - 1) * 4 + 4]["AddConnection"]["status"], 0) + self.disconnect(db) def single(self, thID, db, results): # id = "19149ec8-fa0d-4ed0-9cfb-3e0811b75391" @@ -225,3 +227,4 @@ def test_concurrent(self): idx += 1 self.assertEqual(error_counter, 0) + self.disconnect(db) diff --git a/tests/python/TestTestCommand.py b/tests/python/TestTestCommand.py new file mode 100644 index 00000000..d57fa1fa --- /dev/null +++ b/tests/python/TestTestCommand.py @@ -0,0 +1,46 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2023 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +# The following tests test the TestCommand class found in the TestCommand.py +# file. The TestCommand is a base class used by some derived classes +# found in the tests/python directory +import TestCommand +import unittest + + +class TestTestCommand(unittest.TestCase): + def test_create_connection(self): + tc = TestCommand.TestCommand() + db = tc.create_connection() + self.assertTrue(db.is_connected()) + db.disconnect() + + def test_disconnect(self): + tc = TestCommand.TestCommand() + db = tc.create_connection() + self.assertTrue(db.is_connected()) + db.disconnect() + self.assertFalse(db.is_connected()) diff --git a/tests/python/TestVDMSClient.py b/tests/python/TestVDMSClient.py new file mode 100644 index 00000000..d0bfaf5f --- /dev/null +++ b/tests/python/TestVDMSClient.py @@ -0,0 +1,212 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2023 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +# The following tests test the vdms class found in the vdms.py file +import vdms +import unittest +from io import StringIO +from unittest.mock import patch + + +class TestVDMSClient(unittest.TestCase): + port = 55565 + aws_port = 55564 + hostname = "localhost" + assigned_port = port + + def setUp(self): + # Get the port used to connect to the server + db = self.create_db_connection() + if db is not None: + db.disconnect() + + def create_db_connection(self): + db = vdms.vdms() + connected = False + try: + connected = db.connect(self.hostname, self.port) + self.assigned_port = self.port + except Exception as e: + if e.strerror == "Connection refused": + try: + # Try to connect to the AWS/MinIO port used by the config files + if connected is False: + aws_connected = db.connect(self.hostname, self.aws_port) + if aws_connected is False: + print("create_db_connection() failed") + self.assigned_port = None + return None + else: + self.assigned_port = self.aws_port + connected = True + except Exception as e: + print("create_db_connection() second attempt failed with exception") + else: + print("create_db_connection() first attempt failed with exception") + + return db + + def test_vdms_existing_connection(self): + # Initialize + # VDMS Server Info + db = vdms.vdms() + + # Execute the test + connected = db.connect(self.hostname, self.assigned_port) + + # Try to connect when it is already connected + connected_again = db.connect(self.hostname, self.assigned_port) + + # Check results + self.assertTrue(connected) + self.assertFalse(connected_again) + + # Cleanup + disconnected = db.disconnect() + self.assertTrue(disconnected) + + def test_vdms_non_existing_connection(self): + # Initialize + db = vdms.vdms() + + # Execute the test + disconnected = db.disconnect() + + # Check results + self.assertFalse(disconnected) + + def test_vdms_non_json_query(self): + # Initialize + # VDMS Server Info + db = vdms.vdms() + query = "Non JSON value" + expected_info = "Error parsing the query, ill formed JSON" + expected_status = -1 + expected_command = "Transaction" + + # Execute the test + connected = db.connect(self.hostname, self.assigned_port) + result = db.query(query) + + # Check results + self.assertEqual(expected_command, result[0][0]["FailedCommand"]) + self.assertEqual(expected_info, result[0][0]["info"]) + self.assertEqual(expected_status, result[0][0]["status"]) + self.assertTrue(connected) + + # Cleanup + disconnected = db.disconnect() + self.assertTrue(disconnected) + + def test_vdms_query_disconnected(self): + # Initialize + db = vdms.vdms() + query = "{'test': 'test'}" + expected_result = "NOT CONNECTED" + + # Execute the test + result = db.query(query) + self.assertEqual(result, expected_result) + + def test_vdms_get_last_response(self): + # Initialize + # VDMS Server Info + db = vdms.vdms() + query = "Non JSON value" + expected_info = "Error parsing the query, ill formed JSON" + expected_status = -1 + expected_command = "Transaction" + + # Execute the test + connected = db.connect(self.hostname, self.assigned_port) + result = db.query(query) + last_response = db.get_last_response() + + # Check results + self.assertEqual(expected_command, result[0][0]["FailedCommand"]) + self.assertEqual(expected_info, result[0][0]["info"]) + self.assertEqual(expected_status, result[0][0]["status"]) + self.assertEqual(expected_command, last_response[0]["FailedCommand"]) + self.assertEqual(expected_info, last_response[0]["info"]) + self.assertEqual(expected_status, last_response[0]["status"]) + self.assertTrue(connected) + + # Cleanup + disconnected = db.disconnect() + self.assertTrue(disconnected) + + def test_vdms_get_last_response_str(self): + # Initialize + # VDMS Server Info + db = vdms.vdms() + query = "Non JSON value" + expected_info = "Error parsing the query, ill formed JSON" + expected_status = -1 + expected_command = "Transaction" + expected_response = '[\n {\n "FailedCommand": "Transaction",\n "info": "Error parsing the query, ill formed JSON",\n "status": -1\n }\n]' + + # Execute the test + connected = db.connect(self.hostname, self.assigned_port) + result = db.query(query) + last_response_str = db.get_last_response_str() + + # Check results + self.assertEqual(expected_command, result[0][0]["FailedCommand"]) + self.assertEqual(expected_info, result[0][0]["info"]) + self.assertEqual(expected_status, result[0][0]["status"]) + self.assertEqual(expected_response, last_response_str) + self.assertTrue(connected) + + # Cleanup + disconnected = db.disconnect() + self.assertTrue(disconnected) + + def test_vdms_print_last_response(self): + # Initialize + # VDMS Server Info + db = vdms.vdms() + query = "Non JSON value" + expected_info = "Error parsing the query, ill formed JSON" + expected_status = -1 + expected_command = "Transaction" + expected_output = '[\n {\n "FailedCommand": "Transaction",\n "info": "Error parsing the query, ill formed JSON",\n "status": -1\n }\n]' + + # Execute the test + connected = db.connect(self.hostname, self.assigned_port) + result = db.query(query) + with patch("sys.stdout", new=StringIO()) as fake_out: + db.print_last_response() + fake_output = fake_out.getvalue() + + # Check results + self.assertEqual(fake_output.splitlines(), expected_output.splitlines()) + self.assertEqual(expected_command, result[0][0]["FailedCommand"]) + self.assertEqual(expected_info, result[0][0]["info"]) + self.assertEqual(expected_status, result[0][0]["status"]) + self.assertTrue(connected) + + # Cleanup + db.disconnect() diff --git a/tests/python/TestVideos.py b/tests/python/TestVideos.py index 06365e05..9d5824ce 100644 --- a/tests/python/TestVideos.py +++ b/tests/python/TestVideos.py @@ -94,6 +94,7 @@ def test_addVideo(self): self.assertEqual(len(response), number_of_inserts) for i in range(0, number_of_inserts): self.assertEqual(response[i]["AddVideo"]["status"], 0) + self.disconnect(db) def test_addVideoFromLocalFile_invalid_command(self): # The test is meant to fail if both blob and a local file are specified @@ -111,6 +112,7 @@ def test_addVideoFromLocalFile_invalid_command(self): response, obj_array = db.query([query], [[video_blob]]) self.assertEqual(response[0]["status"], -1) + self.disconnect(db) def test_addVideoFromLocalFile_file_not_found(self): db = self.create_connection() @@ -124,6 +126,7 @@ def test_addVideoFromLocalFile_file_not_found(self): response, obj_array = db.query([query], [[]]) self.assertEqual(response[0]["status"], -1) + self.disconnect(db) @unittest.skip("Skipping class until fixed") def test_addVideoFromLocalFile_success(self): @@ -138,6 +141,7 @@ def test_addVideoFromLocalFile_success(self): response, obj_array = db.query([query], [[]]) self.assertEqual(response[0]["AddVideo"]["status"], 0) + self.disconnect(db) def test_extractKeyFrames(self): db = self.create_connection() @@ -176,6 +180,7 @@ def test_extractKeyFrames(self): # we know that this video has exactly four key frames self.assertEqual(response[0]["FindEntity"]["count"], 4) + self.disconnect(db) def test_findVideo(self): db = self.create_connection() @@ -209,6 +214,7 @@ def test_findVideo(self): self.assertEqual(len(vid_array), number_of_inserts) for i in range(0, number_of_inserts): self.assertEqual(response[i]["FindVideo"]["status"], 0) + self.disconnect(db) def test_FindFramesByFrames(self): db = self.create_connection() @@ -242,6 +248,7 @@ def test_FindFramesByFrames(self): self.assertEqual(response[0]["FindFrames"]["status"], 0) self.assertEqual(response[1]["FindFrames"]["status"], 0) self.assertEqual(len(img_array), 2 * len(video_params["frames"])) + self.disconnect(db) def test_FindFramesByInterval(self): db = self.create_connection() @@ -284,6 +291,7 @@ def test_FindFramesByInterval(self): self.assertEqual(response[0]["FindFrames"]["status"], 0) self.assertEqual(response[1]["FindFrames"]["status"], 0) self.assertEqual(len(img_array), 2 * number_of_frames) + self.disconnect(db) def test_FindFramesMissingParameters(self): db = self.create_connection() @@ -304,6 +312,7 @@ def test_FindFramesMissingParameters(self): self.assertEqual(response[0]["status"], -1) self.assertEqual(img, []) + self.disconnect(db) def test_FindFramesInvalidParameters(self): db = self.create_connection() @@ -334,6 +343,7 @@ def test_FindFramesInvalidParameters(self): self.assertEqual(response[0]["status"], -1) self.assertEqual(img, []) + self.disconnect(db) def test_findVideoResults(self): db = self.create_connection() @@ -371,6 +381,7 @@ def test_findVideoResults(self): self.assertEqual(len(vid_array), number_of_inserts) for i in range(0, number_of_inserts): self.assertEqual(response[i]["FindVideo"]["status"], 0) + self.disconnect(db) def test_addVideoWithLink(self): db = self.create_connection() @@ -423,6 +434,7 @@ def test_addVideoWithLink(self): self.assertEqual(response[0]["AddEntity"]["status"], 0) self.assertEqual(response[1]["AddVideo"]["status"], 0) + self.disconnect(db) def test_findVid_multiple_results(self): db = self.create_connection() @@ -455,6 +467,7 @@ def test_findVid_multiple_results(self): self.assertEqual(len(vid_arr), number_of_inserts) self.assertEqual(response[0]["FindVideo"]["status"], 0) self.assertEqual(response[0]["FindVideo"]["returned"], number_of_inserts) + self.disconnect(db) def test_findVideoNoBlob(self): db = self.create_connection() @@ -490,6 +503,7 @@ def test_findVideoNoBlob(self): self.assertEqual(response[0]["FindVideo"]["status"], 0) self.assertEqual(response[1]["FindVideo"]["status"], 0) self.assertEqual(len(img_array), 0) + self.disconnect(db) def test_updateVideo(self): db = self.create_connection() @@ -522,3 +536,4 @@ def test_updateVideo(self): self.assertEqual(response[0]["UpdateVideo"]["count"], 1) self.assertEqual(len(img_array), 0) + self.disconnect(db) diff --git a/tests/python/config-aws-tests.json b/tests/python/config-aws-tests.json index a623bdcb..60c41aac 100644 --- a/tests/python/config-aws-tests.json +++ b/tests/python/config-aws-tests.json @@ -7,5 +7,7 @@ "db_root_path": "test_db_aws", "storage_type": "aws", //local, aws, etc "bucket_name": "minio-bucket", - "more-info": "github.com/IntelLabs/vdms" + "more-info": "github.com/IntelLabs/vdms", + "aws_log_level": "debug", + "use_endpoint": true } diff --git a/tests/python/run_python_aws_tests.sh b/tests/python/run_python_aws_tests.sh index e50c9d7a..9a88cf73 100755 --- a/tests/python/run_python_aws_tests.sh +++ b/tests/python/run_python_aws_tests.sh @@ -25,31 +25,115 @@ # THE SOFTWARE. # -TEST_DIR=${PWD} -base_dir=$(dirname $(dirname $PWD)) -client_path=${base_dir}/client/python -export PYTHONPATH=$client_path:${PYTHONPATH} +# Command format: +# sh ./run_python_aws_tests.sh -u YOUR_MINIO_USERNAME -p YOUR_MINIO_PASSWORD -# Uncomment to re-generate queryMessage_pb2.py -# protoc -I=${base_dir}/utils/src/protobuf --python_out=${client_path}/vdms ${base_dir}/utils/src/protobuf/queryMessage.proto +# Variable used for storing the process id for the vdms server +py_unittest_pid='UNKNOWN_PROCESS_ID' +# Variable used for storing the process id for the minio server +py_minio_pid='UNKNOWN_PROCESS_ID' -cd ${TEST_DIR} -rm -rf test_db log.log screen.log -mkdir -p test_db +function execute_commands() { + username_was_set=false + password_was_set=false + # Parse the arguments of the command + while getopts u:p: flag + do + case "${flag}" in + u) + username=${OPTARG} + username_was_set=true + ;; + p) + password=${OPTARG} + password_was_set=true + ;; + esac + done -./../../build/vdms -cfg config-aws-tests.json > screen.log 2> log.log & -py_unittest_pid=$! + if [ $username_was_set = false ] || [ $password_was_set = false ]; then + echo 'Missing arguments for "run_python_aws_tests.sh" script' + echo 'Usage: sh ./run_python_aws_tests.sh -u YOUR_MINIO_USERNAME -p YOUR_MINIO_PASSWORD' + exit 1; + fi -sleep 1 + TEST_DIR=${PWD} + base_dir=$(dirname $(dirname $PWD)) + client_path=${base_dir}/client/python + export PYTHONPATH=$client_path:${PYTHONPATH} -#start the minio server -./../../minio server ./../../minio_files & -py_minio_pid=$! + # Uncomment to re-generate queryMessage_pb2.py + # protoc -I=${base_dir}/utils/src/protobuf --python_out=${client_path}/vdms ${base_dir}/utils/src/protobuf/queryMessage.proto -sleep 2 + cd ${TEST_DIR} -echo 'Running Python AWS S3 tests...' -python3 -m coverage run --include="../../*" --omit="${base_dir}/client/python/vdms/queryMessage_pb2.py,../*" -m unittest discover --pattern=Test*.py -v + # Kill current instances of minio + echo 'Killing current instances of minio' + pkill -9 minio || true + sleep 2 -rm -rf test_db log.log screen.log -kill -9 $py_unittest_pid $py_minio_pid || true \ No newline at end of file + echo 'Removing temporary files' + rm -rf ../../minio_files/ || true + rm -rf test_db/ || true + rm -rf test_db_aws/ || true + + rm -rf test_db log.log screen.log + mkdir -p test_db + + echo 'Starting vdms server' + ./../../build/vdms -cfg config-aws-tests.json > screen.log 2> log.log & + py_unittest_pid=$! + + sleep 1 + + #start the minio server + echo 'Starting minio server' + ./../../minio server ./../../minio_files & + py_minio_pid=$! + + sleep 2 + echo 'Creating buckets for the tests' + # Create the minio-bucket for MinIO + # by using the corresponding MinIO client which connects to the MinIO server + # by using its username and password + mc alias set myminio/ http://localhost:9000 $username $password + mc mb myminio/minio-bucket + + sleep 2 + + # Starting the testing + echo 'Starting the testing' + echo 'Running Python AWS S3 tests...' + python3 -m coverage run --include="../../*" --omit="${base_dir}/client/python/vdms/queryMessage_pb2.py,../*" -m unittest discover --pattern=Test*.py -v + echo 'Finished' + exit 0 +} + +# Cleanup function to kill those processes which were started by the script +# Also it deletes those directories created by the script (or its tests) +function cleanup() { + # Removing log files + echo 'Removing log files' + rm -rf test_db log.log screen.log + + echo 'Removing temporary files' + rm -rf ../../minio_files/ || true + rm -rf test_db/ || true + rm -rf test_db_aws/ || true + + # Killing vdms and minio processes after finishing the testing + echo 'Killing vdms and minio processes after finishing the testing' + kill -9 $py_unittest_pid $py_minio_pid || true + exit 0 +} + +# Get the arguments sent to the script command +args=$@ + +# These traps call to cleanup() function when one those signals happen +trap cleanup EXIT +trap cleanup ERR +trap cleanup SIGINT + +# Call to execute the script commands +execute_commands ${args} diff --git a/tests/python/run_python_tests.sh b/tests/python/run_python_tests.sh index 144525d3..84649bc0 100755 --- a/tests/python/run_python_tests.sh +++ b/tests/python/run_python_tests.sh @@ -25,25 +25,49 @@ # THE SOFTWARE. # -TEST_DIR=${PWD} -base_dir=$(dirname $(dirname $PWD)) -client_path=${base_dir}/client/python -export PYTHONPATH=$client_path:${PYTHONPATH} +# Variable used for storing the process id for the vdms server +py_unittest_pid='UNKNOWN_PROCESS_ID' -# Uncomment to re-generate queryMessage_pb2.py -# protoc -I=${base_dir}/utils/src/protobuf --python_out=${client_path}/vdms ${base_dir}/utils/src/protobuf/queryMessage.proto +function execute_commands() { + TEST_DIR=${PWD} + base_dir=$(dirname $(dirname $PWD)) + client_path=${base_dir}/client/python + export PYTHONPATH=$client_path:${PYTHONPATH} -cd ${TEST_DIR} -rm -rf test_db log.log screen.log -mkdir -p test_db + # Uncomment to re-generate queryMessage_pb2.py + # protoc -I=${base_dir}/utils/src/protobuf --python_out=${client_path}/vdms ${base_dir}/utils/src/protobuf/queryMessage.proto -./../../build/vdms -cfg config-tests.json > screen.log 2> log.log & -py_unittest_pid=$! + cd ${TEST_DIR} + rm -rf test_db log.log screen.log + mkdir -p test_db -sleep 1 + ./../../build/vdms -cfg config-tests.json > screen.log 2> log.log & + py_unittest_pid=$! -echo 'Running Python tests...' -python3 -m coverage run --include="../../*" --omit="${base_dir}/client/python/vdms/queryMessage_pb2.py,../*" -m unittest discover --pattern=Test*.py -v + sleep 1 -rm -rf test_db log.log screen.log -kill -9 $py_unittest_pid || true + echo 'Running Python tests...' + python3 -m coverage run --include="../../*" --omit="${base_dir}/client/python/vdms/queryMessage_pb2.py,../*" -m unittest discover --pattern=Test*.py -v + + echo 'Finished' + exit 0 +} + +# Cleanup function to kill those processes which were started by the script +# Also it deletes those directories created by the script (or its tests) +function cleanup() { + rm -rf test_db log.log screen.log + kill -9 $py_unittest_pid || true + exit 0 +} + +# Get the arguments sent to the script command +args=$@ + +# These traps call to cleanup() function when one those signals happen +trap cleanup EXIT +trap cleanup ERR +trap cleanup SIGINT + +# Call to execute the script commands +execute_commands ${args} diff --git a/tests/run_aws_tests.sh b/tests/run_aws_tests.sh index 9546a022..cd9c6680 100755 --- a/tests/run_aws_tests.sh +++ b/tests/run_aws_tests.sh @@ -1,19 +1,88 @@ #!/bin/bash -e -sh cleandbs.sh || true -mkdir test_db_client -mkdir dbs # necessary for Descriptors -mkdir temp # necessary for Videos -mkdir videos_tests -mkdir backups +# Command format: +# sh ./run_aws_tests.sh -u YOUR_MINIO_USERNAME -p YOUR_MINIO_PASSWORD -#start the minio server -./../minio server ./../minio_files & -py_minio_pid=$! +# Variable used for storing the process id for the minio server +py_minio_pid='UNKNOWN_PROCESS_ID' -sleep 2 +function execute_commands() { + username_was_set=false + password_was_set=false -echo 'Running C++ tests...' -./../build/tests/unit_tests --gtest_filter=RemoteConnectionTest.* + # Parse the arguments of the command + while getopts u:p: flag + do + case "${flag}" in + u) + username=${OPTARG} + username_was_set=true + ;; + p) + password=${OPTARG} + password_was_set=true + ;; + esac + done -kill -9 $py_minio_pid || true + if [ $username_was_set = false ] || [ $password_was_set = false ]; then + echo 'Missing arguments for "run_aws_tests.sh" script' + echo 'Usage: sh ./run_aws_tests.sh -u YOUR_MINIO_USERNAME -p YOUR_MINIO_PASSWORD' + exit 1; + fi + + # Kill current instances of minio + echo 'Killing current instances of minio' + pkill -9 minio || true + sleep 2 + + sh cleandbs.sh || true + mkdir test_db_client + mkdir dbs # necessary for Descriptors + mkdir temp # necessary for Videos + mkdir videos_tests + mkdir backups + + #start the minio server + ./../minio server ./../minio_files & + py_minio_pid=$! + + sleep 2 + echo 'Creating buckets for the tests' + # Create the minio-bucket for MinIO + # by using the corresponding MinIO client which connects to the MinIO server + # by using its username and password + mc alias set myminio/ http://localhost:9000 $username $password + mc mb myminio/minio-bucket + + echo 'Running C++ tests...' + ./../build/tests/unit_tests --gtest_filter=RemoteConnectionTest.* + + echo 'Finished' + exit 0 +} + +# Cleanup function to kill those processes which were started by the script +# Also it deletes those directories created by the script (or its tests) +function cleanup() { + echo "Killing the minio server" + kill -9 $py_minio_pid || true + + echo 'Removing temporary files' + rm -rf ../minio_files/ || true + rm -rf test_db/ || true + rm -rf test_db_aws/ || true + rm -rf tdb/ || true + exit 0 +} + +# Get the arguments sent to the script command +args=$@ + +# These traps call to cleanup() function when one those signals happen +trap cleanup EXIT +trap cleanup ERR +trap cleanup SIGINT + +# Call to execute the script commands +execute_commands ${args} diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 5520b073..b31ba01f 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -1,42 +1,73 @@ #!/bin/bash -e -sh cleandbs.sh || true -mkdir test_db_client -mkdir dbs # necessary for Descriptors -mkdir temp # necessary for Videos -mkdir videos_tests -mkdir backups +# Variable used for storing the process id for the vdms server and client +cpp_unittest_pid='UNKNOWN_PROCESS_ID' +client_test_pid='UNKNOWN_PROCESS_ID' -# Stop UDF Queue and Remote Server if already running -pkill -9 -f udf_server.py || true -pkill -9 -f udf_local.py || true +function execute_commands() { + sh cleandbs.sh || true + mkdir test_db_client + mkdir dbs # necessary for Descriptors + mkdir temp # necessary for Videos + mkdir videos_tests + mkdir backups -# Start remote server for test -cd remote_function_test -python3 -m pip install -r requirements.txt -python3 udf_server.py 5010 > ../tests_remote_screen.log 2> ../tests_remote_log.log & + # Stop UDF Queue and Remote Server if already running + pkill -9 -f udf_server.py || true + pkill -9 -f udf_local.py || true -# Start UDF message queue for test -cd ../udf_test -python3 -m pip install -r requirements.txt -python3 udf_local.py > ../tests_udf_screen.log 2> ../tests_udf_log.log & + # Start remote server for test + cd remote_function_test + python3 -m pip install -r requirements.txt + python3 udf_server.py 5010 > ../tests_remote_screen.log 2> ../tests_remote_log.log & -cd .. + # Start UDF message queue for test + cd ../udf_test + python3 -m pip install -r requirements.txt + python3 udf_local.py > ../tests_udf_screen.log 2> ../tests_udf_log.log & -# Start server for client test -./../build/vdms -cfg unit_tests/config-tests.json > tests_screen.log 2> tests_log.log & -cpp_unittest_pid=$! + cd .. -./../build/vdms -cfg unit_tests/config-client-tests.json > tests_screen.log 2> tests_log.log & -client_test_pid=$! + # Start server for client test + ./../build/vdms -cfg unit_tests/config-tests.json > tests_screen.log 2> tests_log.log & + cpp_unittest_pid=$! -echo 'not the vdms application - this file is needed for shared key' > vdms + ./../build/vdms -cfg unit_tests/config-client-tests.json > tests_screen.log 2> tests_log.log & + client_test_pid=$! -echo 'Running C++ tests...' -./../build/tests/unit_tests \ - --gtest_filter=-ImageTest.CreateNameTDB:ImageTest.NoMetadata:VideoTest.CreateUnique:VideoTest.SyncRemoteWrite:VideoTest.UDFWrite:Descriptors_Add.add_1by1_and_search_1k:RemoteConnectionTest.* + echo 'not the vdms application - this file is needed for shared key' > vdms -pkill -9 -f udf_server.py -pkill -9 -f udf_local.py + echo 'Running C++ tests...' + ./../build/tests/unit_tests \ + --gtest_filter=-ImageTest.CreateNameTDB:ImageTest.NoMetadata:VideoTest.CreateUnique:VideoTest.SyncRemoteWrite:VideoTest.UDFWrite:Descriptors_Add.add_1by1_and_search_1k:RemoteConnectionTest.* + echo 'Finished' + exit 0 +} -kill -9 $cpp_unittest_pid $client_test_pid || true +# Cleanup function to kill those processes which were started by the script +# Also it deletes those directories created by the script (or its tests) +function cleanup() { + + echo "Killing the udf_server and udf_local" + pkill -9 -f udf_server.py + pkill -9 -f udf_local.py + + echo "Killing the vdms server and client" + kill -9 $cpp_unittest_pid $client_test_pid || true + + # Clean up + echo 'Removing the temporary files created' + sh ./cleandbs.sh || true + exit 0 +} + +# Get the arguments sent to the script command +args=$@ + +# These traps call to cleanup() function when one those signals happen +trap cleanup EXIT +trap cleanup ERR +trap cleanup SIGINT + +# Call to execute the script commands +execute_commands ${args} diff --git a/tests/server/AddFindDescriptorSet.json b/tests/server/AddFindDescriptorSet.json new file mode 100644 index 00000000..7131cddf --- /dev/null +++ b/tests/server/AddFindDescriptorSet.json @@ -0,0 +1,18 @@ +[ + + { + "AddDescriptorSet": { + "engine": "FaissFlat", + "metric": "L2", + "name": "pretty_faces", + "dimensions": 128 + } + }, + { + "FindDescriptorSet": { + "set": "pretty_faces", + "storeIndex": true + } + } + +] \ No newline at end of file diff --git a/tests/server/json_queries.cc b/tests/server/json_queries.cc index ce534a61..76d6422a 100644 --- a/tests/server/json_queries.cc +++ b/tests/server/json_queries.cc @@ -729,3 +729,54 @@ TEST(QueryHandler, AddUpdateFind_Blob) { VDMSConfig::destroy(); PMGDQueryHandler::destroy(); } +TEST(QueryHandler, AddFind_DescriptorSet) { + + Json::StyledWriter writer; + + std::ifstream ifile; + int fsize; + char *inBuf; + ifile.open("server/AddFindDescriptorSet.json", std::ifstream::in); + ifile.seekg(0, std::ios::end); + fsize = (int)ifile.tellg(); + ifile.seekg(0, std::ios::beg); + inBuf = new char[fsize]; + ifile.read(inBuf, fsize); + std::string json_query = std::string(inBuf); + ifile.close(); + delete[] inBuf; + + Json::Reader reader; + Json::Value root; + Json::Value parsed; + + VDMSConfig::init("unit_tests/config-tests.json"); + PMGDQueryHandler::init(); + QueryHandlerPMGD::init(); + + QueryHandlerPMGD qh_base; + qh_base.reset_autodelete_init_flag(); // set flag to show autodelete queue has + // been initialized + QueryHandlerPMGDTester query_handler(qh_base); + + VDMS::protobufs::queryMessage proto_query; + proto_query.set_json(json_query); + + VDMS::protobufs::queryMessage response; + + query_handler.pq(proto_query, response); + + reader.parse(response.json().c_str(), parsed); + // std::cout << writer.write(parsed) << std::endl; + + // Verify results returned. + for (int j = 0; j < parsed.size(); j++) { + const Json::Value &query = parsed[j]; + ASSERT_EQ(query.getMemberNames().size(), 1); + std::string cmd = query.getMemberNames()[0]; + EXPECT_EQ(query[cmd]["status"].asInt(), 0); + } + + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} diff --git a/tests/unit_tests/RemoteConnection_test.cc b/tests/unit_tests/RemoteConnection_test.cc index 9b1191de..a740d3c1 100644 --- a/tests/unit_tests/RemoteConnection_test.cc +++ b/tests/unit_tests/RemoteConnection_test.cc @@ -27,21 +27,23 @@ * */ -#include "Image.h" -#include "TDBImage.h" -#include "gtest/gtest.h" - -#include "RemoteConnection.h" +#include +#include "gtest/gtest.h" #include #include #include -#include +#include "Image.h" +#include "TDBImage.h" + +#include "RemoteConnection.h" +#include "VDMSConfig.h" class RemoteConnectionTest : public ::testing::Test { protected: virtual void SetUp() { + VDMS::VDMSConfig::init("unit_tests/config-aws-tests.json"); img_ = "test_images/large1.jpg"; tdb_img_ = "tdb/test_image.tdb"; video_ = "test_videos/Megamind.avi"; @@ -54,6 +56,7 @@ class RemoteConnectionTest : public ::testing::Test { } virtual void TearDown() { + VDMS::VDMSConfig::destroy(); connection_->end(); delete connection_; } diff --git a/tests/unit_tests/Video_test.cc b/tests/unit_tests/Video_test.cc index 726f78d1..372fa29e 100644 --- a/tests/unit_tests/Video_test.cc +++ b/tests/unit_tests/Video_test.cc @@ -47,6 +47,8 @@ #include "helpers.h" +#include "VDMSConfig.h" + using namespace std; class VideoTest : public ::testing::Test { @@ -58,6 +60,8 @@ class VideoTest : public ::testing::Test { std::vector _frames_h264; virtual void SetUp() { + + VDMS::VDMSConfig::init("unit_tests/config-tests.json"); _video_path_avi_xvid = "videos/Megamind.avi"; _video_path_mp4_h264 = "videos/Megamind.mp4"; @@ -88,6 +92,8 @@ class VideoTest : public ::testing::Test { } } + virtual void TearDown() { VDMS::VDMSConfig::destroy(); } + int get_fourcc() { return cv::VideoWriter::fourcc('H', '2', '6', '4'); } }; @@ -307,9 +313,11 @@ TEST_F(VideoTest, ReadMP4_H264) { */ TEST_F(VideoTest, WriteMP4_H264) { try { - std::string temp_video_input("/tmp/video_test_WriteMP4_H264_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_WriteMP4_H264_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_WriteMP4_H264_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_WriteMP4_H264_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string write_output_vcl("videos_tests/write_test_vcl.mp4"); @@ -357,10 +365,12 @@ TEST_F(VideoTest, WriteMP4_H264) { */ TEST_F(VideoTest, WriteAVI_XVID) { try { - std::string temp_video_input("/tmp/video_test_WriteAVI_XVID_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_WriteAVI_XVID_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, cv::VideoWriter::fourcc('X', 'V', 'I', 'D')); - std::string temp_video_test("/tmp/video_test_WriteAVI_XVID_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_WriteAVI_XVID_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, cv::VideoWriter::fourcc('X', 'V', 'I', 'D')); @@ -415,9 +425,11 @@ TEST_F(VideoTest, ResizeWrite) { try { - std::string temp_video_input("/tmp/video_test_ResizeWrite_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_ResizeWrite_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_ResizeWrite_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_ResizeWrite_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string resize_name_vcl("videos_tests/resize_vcl.mp4"); @@ -490,9 +502,11 @@ TEST_F(VideoTest, IntervalWrite) { try { - std::string temp_video_input("/tmp/video_test_IntervalWrite_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_IntervalWrite_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_IntervalWrite_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_IntervalWrite_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string interval_name_vcl("videos_tests/interval_vcl.mp4"); @@ -620,9 +634,11 @@ TEST_F(VideoTest, ThresholdWrite) { try { - std::string temp_video_input("/tmp/video_test_ThresholdWrite_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_ThresholdWrite_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_ThresholdWrite_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_ThresholdWrite_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string threshold_name_vcl("videos_tests/threshold_vcl.mp4"); @@ -701,9 +717,11 @@ TEST_F(VideoTest, CropWrite) { try { - std::string temp_video_input("/tmp/video_test_CropWrite_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_CropWrite_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_CropWrite_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_CropWrite_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string crop_name_vcl("videos_tests/crop_vcl.mp4"); @@ -781,9 +799,11 @@ TEST_F(VideoTest, SyncRemoteWrite) { try { - std::string temp_video_input("/tmp/video_test_SyncRemoteWrite_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_SyncRemoteWrite_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_SyncRemoteWrite_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_SyncRemoteWrite_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string syncremote_name_vcl("videos_tests/syncremote_vcl.mp4"); @@ -860,9 +880,11 @@ TEST_F(VideoTest, UDFWrite) { try { - std::string temp_video_input("/tmp/video_test_UDFWrite_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_UDFWrite_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); - std::string temp_video_test("/tmp/video_test_UDFemoteWrite_test.avi"); + std::string temp_video_test(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_UDFemoteWrite_test.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_test, get_fourcc()); std::string udf_name_vcl("videos_tests/udf_vcl.mp4"); @@ -936,7 +958,8 @@ TEST_F(VideoTest, VideoLoopTest) { _options["text"] = "Video"; _options["id"] = "caption"; - std::string temp_video_input("/tmp/video_test_VideoLoopTest_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_VideoLoopTest_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); std::string vloop_name_vcl("videos_tests/vloop_vcl.mp4"); @@ -991,8 +1014,8 @@ TEST_F(VideoTest, VideoLoopPipelineTest) { int end = 100; int step = 5; - std::string temp_video_input( - "/tmp/video_test_VideoLoopPipelineTest_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_VideoLoopPipelineTest_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); std::string vloop_name_vcl("videos_tests/vloop_vcl.mp4"); @@ -1043,7 +1066,8 @@ TEST_F(VideoTest, VideoLoopTestError) { _options["text"] = "Video"; _options["id"] = "caption"; - std::string temp_video_input("/tmp/video_test_VideoLoopTestError_input.avi"); + std::string temp_video_input(VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_VideoLoopTestError_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); std::string vloop_name_vcl("videos_tests/vloop_vcl.mp4"); @@ -1084,7 +1108,8 @@ TEST_F(VideoTest, VideoLoopSyncRemoteTestError) { _options["id"] = "caption"; std::string temp_video_input( - "/tmp/video_test_VideoLoopSyncRemoteTestError_input.avi"); + VDMS::VDMSConfig::instance()->get_path_tmp() + + "/video_test_VideoLoopSyncRemoteTestError_input.avi"); copy_video_to_temp(_video_path_avi_xvid, temp_video_input, get_fourcc()); std::string vloop_name_vcl("videos_tests/vloop_vcl.mp4"); diff --git a/tests/unit_tests/config-aws-tests.json b/tests/unit_tests/config-aws-tests.json index 23f50bb2..ffc3c49e 100644 --- a/tests/unit_tests/config-aws-tests.json +++ b/tests/unit_tests/config-aws-tests.json @@ -7,5 +7,7 @@ "db_root_path": "test_db_1", "storage_type": "aws", //local, aws, etc "bucket_name": "minio-bucket", - "more-info": "github.com/IntelLabs/vdms" + "more-info": "github.com/IntelLabs/vdms", + "aws_log_level": "debug", + "use_endpoint": true } diff --git a/utils/src/api_schema/api_schema.json b/utils/src/api_schema/api_schema.json index ee17bdaa..f74764d0 100644 --- a/utils/src/api_schema/api_schema.json +++ b/utils/src/api_schema/api_schema.json @@ -47,6 +47,7 @@ { "$ref": "#/definitions/FindImageTop" }, { "$ref": "#/definitions/AddDescriptorSetTop" }, + { "$ref": "#/definitions/FindDescriptorSetTop" }, { "$ref": "#/definitions/AddDescriptorTop" }, { "$ref": "#/definitions/ClassifyDescriptorTop" }, { "$ref": "#/definitions/FindDescriptorTop" }, @@ -450,6 +451,13 @@ }, "additionalProperties": false }, + "FindDescriptorSetTop": { + "properties": { + "FindDescriptorSet" : { "type": "object", + "$ref": "#/definitions/FindDescriptorSet" } + }, + "additionalProperties": false + }, "DeleteExpiredTop": { "properties": { "DeleteExpired" : { "type": "object", "$ref": "#/definitions/DeleteExpired" } @@ -672,6 +680,20 @@ "required": ["name", "dimensions"], "additionalProperties": false }, + "FindDescriptorSet": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "results": { "$ref": "#/definitions/blockResults" }, + "set": { "type": "string" }, + "storeIndex" : { "type": "boolean" }, + "constraints": { "type": "object" }, + "link": { "$ref": "#/definitions/blockLink" } + + }, + "required": ["set"], + + "additionalProperties": false + }, "AddDescriptor": { "properties": { diff --git a/utils/src/stats/SystemStats.cc b/utils/src/stats/SystemStats.cc index 45353bb9..00b8cd00 100644 --- a/utils/src/stats/SystemStats.cc +++ b/utils/src/stats/SystemStats.cc @@ -30,7 +30,6 @@ #include "sys/sysinfo.h" #include "sys/times.h" #include "sys/types.h" -#include "sys/vtimes.h" #include "stdio.h" #include "stdlib.h"