diff --git a/INSTALL.md b/INSTALL.md index 1ca97bcc..3dba31c5 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -29,22 +29,20 @@ * This is a headers-only library, no compilation/installation necessary * Persistent Memory Graph Database (PMGD) - * Download version 1.0.0 from: https://github.com/IntelLabs/pmgd/releases + * Download version 2.0.0 from: https://github.com/IntelLabs/pmgd/releases * Follow installation instructions * Visual Compute Library - * Download version 0.1.0 from: https://github.com/IntelLabs/vcl/releases + * Download version 1.0.0 from: https://github.com/IntelLabs/vcl/releases * Follow installation instructions -### Requirement for Python Client +### Python Client Module - sudo apt-get install python-pip - pip install protobuf (may need to run as sudo) +VDMS offers the Python Client Module through the pip package manager, +and it is compatible with Python 2.7+ and 3.3+. +pip (or pip2 and pip3) will automatically install dependencies (protobuf). - Add VDMS Python module to PYPATH: - export PYTHONPATH="${PYTHONPATH}:/client/python/vdms" - # Example: - export PYTHONPATH="${PYTHONPATH}:/opt/intel/vdms/client/python/vdms" + pip install vdms ### Compilation @@ -56,8 +54,8 @@ Flag | Explanation ------------ | ------------- ---no-server | Compiles client libraries (C++/Python) only. (will not compile neither server not tests) ---timing | Compiles server with chronos for internal timing. +--no-server | Compiles client libraries (C++/Python) only. (will not compile neither server nor tests) +--timing | Compiles server with chronos for internal timing, experimental. -jX | Compiles in parallel, using X cores INTEL_PATH=path | Path to the root folder containing pmgd and vcl. Default is "./" which is pmgd and vcl inside vdms folder. Example: scons INTEL_PATH=/opt/intel/ @@ -68,8 +66,6 @@ Some of the parameters include the TCP port that will be use for incoming connections, maximun number of simultaneous clients, and paths to the folders where data/metadata will be stored. -**Note:** The folders must already exists in the filesystem. - We provide a script (run_server.sh) that will create some default directories, corresponding the default values in the config-vdms.json. diff --git a/README.md b/README.md index ae7dd952..e79dc3dd 100644 --- a/README.md +++ b/README.md @@ -29,9 +29,6 @@ propose a storage architecture designed for efficient visual data access that exploits next generation hardware and give preliminary results showing how it enables efficient vision analytics. -Here is our [ATC HotStorage '17 Position Paper](https://www.usenix.org/system/files/conference/hotstorage17/hotstorage17-paper-gupta-cledat.pdf). -Also, take a look at our [presentation](https://www.usenix.org/conference/hotstorage17/program/presentation/gupta-cledat). - ## Get Started To get started, take a look at the [INSTALL.md](INSTALL.md) file, where @@ -41,16 +38,9 @@ Also, visit our [wiki](https://github.com/IntelLabs/vdms/wiki) to learn more about the VDMS API, and take a look at some of the examples/tutorials. -## Cite - -Feel free to include our work into your research! +## Academic Papers - @inproceedings {203374, - author = {Vishakha Gupta-Cledat and Luis Remis and Christina R Strong}, - title = {Addressing the Dark Side of Vision Research: Storage}, - booktitle = {9th {USENIX} Workshop on Hot Topics in Storage and File Systems (HotStorage 17)}, - year = {2017}, - address = {Santa Clara, CA}, - url = {https://www.usenix.org/conference/hotstorage17/program/presentation/gupta-cledat}, - publisher = {{USENIX} Association}, - } +Conference | Links, Cite | Description +------------ | ------------- | ------------- +Learning Systems @ NIPS 2018 | [Paper](https://export.arxiv.org/abs/1810.11832), [Cite](https://dblp.uni-trier.de/rec/bibtex/journals/corr/abs-1810-11832) | Systems for Machine Learning [Workshop](http://learningsys.org/nips18/cfp.html) @ NIPS +HotStorage @ ATC 2017 | [Paper](https://www.usenix.org/conference/hotstorage17/program/presentation/gupta-cledat), [Presentation](https://www.usenix.org/conference/hotstorage17/program/presentation/gupta-cledat), [Cite](https://www.usenix.org/biblio/export/bibtex/203374)| Positioning Paper at USENIX ATC 2017 Workshop diff --git a/SConstruct b/SConstruct index b1c12efd..1652779e 100644 --- a/SConstruct +++ b/SConstruct @@ -18,7 +18,7 @@ def buildServer(env): os.path.join(env['INTEL_PATH'], 'vcl/src'), ], LIBS = [ 'pmgd', 'pmgd-util', - 'jsoncpp', 'protobuf', + 'jsoncpp', 'protobuf', 'tbb', 'vdms-utils', 'vcl', 'pthread', ], LIBPATH = ['/usr/local/lib/', 'utils/', @@ -35,24 +35,28 @@ def buildServer(env): 'src/QueryHandler.cc', 'src/QueryMessage.cc', 'src/CommunicationManager.cc', + 'src/ExceptionsCommand.cc', 'src/PMGDQuery.cc', 'src/SearchExpression.cc', 'src/PMGDIterators.cc', 'src/PMGDQueryHandler.cc', 'src/RSCommand.cc', 'src/ImageCommand.cc', - 'src/ExceptionsCommand.cc', + 'src/DescriptorsManager.cc', + 'src/DescriptorsCommand.cc', + 'src/BoundingBoxCommand.cc', + 'src/VideoCommand.cc', ] - vdms = env.Program('vdms', vdms_server_files) + env.Program('vdms', vdms_server_files) # Set INTEL_PATH. First check arguments, then enviroment, then default if ARGUMENTS.get('INTEL_PATH', '') != '': - intel_path = ARGUMENTS.get("INTEL_PATH", '') + intel_path = ARGUMENTS.get("INTEL_PATH", '') elif os.environ.get('INTEL_PATH', '') != '': - intel_path = os.environ.get('INTEL_PATH', '') + intel_path = os.environ.get('INTEL_PATH', '') else: - intel_path = os.getcwd() + intel_path = os.getcwd() # Enviroment use by all the builds env = Environment(CXXFLAGS="-std=c++11 -O3") @@ -63,6 +67,6 @@ SConscript(os.path.join('utils', 'SConscript'), exports=['env']) SConscript(os.path.join('client/cpp','SConscript'), exports=['env']) if GetOption('no-server'): - buildServer(env) - # Build tests only if server is built - SConscript(os.path.join('tests', 'SConscript'), exports=['env']) + buildServer(env) + # Build tests only if server is built + SConscript(os.path.join('tests', 'SConscript'), exports=['env']) diff --git a/client/cpp/VDMSClient.cc b/client/cpp/VDMSClient.cc index 0986e14d..3e7dd159 100644 --- a/client/cpp/VDMSClient.cc +++ b/client/cpp/VDMSClient.cc @@ -38,15 +38,15 @@ const string VDMSClient::query(const string &json) protobufs::queryMessage cmd; cmd.set_json(json); - std::basic_string msg(cmd.ByteSize(),0); - cmd.SerializeToArray((void*)msg.data(), msg.length()); - _conn.send_message(msg.data(), msg.length()); + std::basic_string msg(cmd.ByteSize(),0); + cmd.SerializeToArray((void*)msg.data(), msg.length()); + _conn.send_message(msg.data(), msg.length()); // Wait now for response // TODO: Perhaps add an asynchronous version too. - msg = _conn.recv_message(); - protobufs::queryMessage resp; - resp.ParseFromArray((const void*)msg.data(), msg.length()); + msg = _conn.recv_message(); + protobufs::queryMessage resp; + resp.ParseFromArray((const void*)msg.data(), msg.length()); return resp.json(); } @@ -56,19 +56,20 @@ const string VDMSClient::query(const string &json, const vector blobs) protobufs::queryMessage cmd; cmd.set_json(json); - for (auto it : blobs) { + for (auto& it : blobs) { string *blob = cmd.add_blobs(); - blob = it; + *blob = *it; } - std::basic_string msg(cmd.ByteSize(),0); - cmd.SerializeToArray((void*)msg.data(), msg.length()); - _conn.send_message(msg.data(), msg.length()); + + std::basic_string msg(cmd.ByteSize(),0); + cmd.SerializeToArray((void*)msg.data(), msg.length()); + _conn.send_message(msg.data(), msg.length()); // Wait now for response // TODO: Perhaps add an asynchronous version too. - msg = _conn.recv_message(); - protobufs::queryMessage resp; - resp.ParseFromArray((const void*)msg.data(), msg.length()); + msg = _conn.recv_message(); + protobufs::queryMessage resp; + resp.ParseFromArray((const void*)msg.data(), msg.length()); return resp.json(); } diff --git a/client/python/vdms/__init__.py b/client/python/vdms/__init__.py index e69de29b..1ec484f1 100644 --- a/client/python/vdms/__init__.py +++ b/client/python/vdms/__init__.py @@ -0,0 +1,4 @@ +name = "vdms" + +from .vdms import * + diff --git a/client/python/vdms/vdms.py b/client/python/vdms/vdms.py index 73668597..a5e26053 100644 --- a/client/python/vdms/vdms.py +++ b/client/python/vdms/vdms.py @@ -35,14 +35,18 @@ import json # VDMS Protobuf import (autogenerated) -import queryMessage_pb2 +from . import queryMessage_pb2 -class VDMS(object): +class vdms(object): def __init__(self): self.dataNotUsed = [] self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.connected = False + self.last_response = '' + + def __del__(self): + self.conn.close() def connect(self, host='localhost', port=55555): self.conn.connect((host, port)) @@ -81,12 +85,10 @@ def query(self, query, img_array = []): # end = time.time() # print "ATcomm[ms]:" + str((end - start)*1000) - # time.sleep(1) - # Recieve response recv_len = self.conn.recv(4) recv_len = struct.unpack('@I', recv_len)[0] - response = '' + response = b'' while len(response) < recv_len: packet = self.conn.recv(recv_len - len(response)) if not packet: @@ -100,14 +102,12 @@ def query(self, query, img_array = []): for b in querRes.blobs: img_array.append(b) - return (querRes.json, img_array) + self.last_response = json.loads(querRes.json) + + return (self.last_response, img_array) -# Aux functions for printing JSON queries/responses -def aux_print_json(data): - # Check the query type - if isinstance(data, str): # assumes string - json_obj = json.loads(data) - else: - json_obj = data + def get_last_response(self): + return self.last_response - print json.dumps(json_obj, indent=4, sort_keys=False) + def get_last_response_str(self): + return json.dumps(self.last_response, indent=4, sort_keys=False) diff --git a/config-vdms.json b/config-vdms.json index c0db7a85..ee3e0d21 100644 --- a/config-vdms.json +++ b/config-vdms.json @@ -3,15 +3,11 @@ // Sets database paths and other parameters { // Network - "port": 55555, // Default is 55555 - "max_simultaneous_clients": 20, // Default is 500 + "port": 55555, + "max_simultaneous_clients": 100, // Database paths - "pmgd_path": "db/graph", // This will be an IP address in the future - "png_path": "db/images/pngs/", - "jpg_path": "db/images/jpgs/", - "tdb_path": "db/images/tiledb/tdb/", + "db_root_path": "db", - "support_info": "a-team@intel.com", - "support_phone": "1-800-A-TEAM" + "more-info": "github.com/IntelLabs/vdms" } diff --git a/docker/Dockerfile b/docker/Dockerfile index 77e260f5..da6f0b84 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,80 +1,106 @@ # Pull base image. FROM ubuntu:16.04 -RUN apt-get -qq update -RUN apt-get -qq install -y python python-dev python-pip python-virtualenv -RUN apt-get -qq install -y bzip2 -RUN apt-get install --no-install-recommends -y libjsoncpp-dev build-essential cmake git pkg-config libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libatlas-base-dev libboost-all-dev libgflags-dev libgoogle-glog-dev liblmdb-dev libgflags-dev libgoogle-glog-dev liblmdb-dev -RUN apt-get install -y scons libjsoncpp-dev flex javacc libbison-dev openjdk-8-jdk -RUN apt-get install -y autoconf automake libtool curl make g++ unzip -RUN apt-get install -y cmake wget git zlib1g-dev libbz2-dev libssl-dev liblz4-dev mpich -RUN apt-get install -y libjpeg8-dev libtiff5-dev libjasper-dev libpng12-dev libgtk-3-dev -RUN apt-get install -y zlib1g-dev -RUN apt-get install -y libbz2-dev -RUN apt-get install -y libssl-dev -RUN apt-get install -y liblz4-dev -RUN apt-get install -y autoconf automake libtool curl make g++ unzip -RUN apt-get install -y libopenmpi-dev -RUN apt-get install -y libgtest-dev -RUN apt-get install -y ed -RUN apt-get install -y libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev -RUN apt-get install -y libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev -RUN pip install scons numpy +RUN apt-get update && apt-get upgrade -y && \ + apt-get install -y \ + build-essential scons autoconf automake libtool curl make g++ unzip \ + bzip2 cmake git pkg-config \ + python python-dev python-pip python-virtualenv \ + wget zlib1g-dev libbz2-dev libssl-dev liblz4-dev mpich \ + libjsoncpp-dev flex javacc libbison-dev openjdk-8-jdk \ + libleveldb-dev libsnappy-dev libhdf5-serial-dev \ + libatlas-base-dev libboost-all-dev libgflags-dev libgoogle-glog-dev \ + liblmdb-dev \ + libjpeg8-dev libtiff5-dev libjasper-dev libpng12-dev libgtk-3-dev \ + libopenmpi-dev \ + libgtest-dev ed \ + libgtk2.0-dev pkg-config libavcodec-dev \ + libavformat-dev libswscale-dev \ + libtbb2 libtbb-dev libjpeg-dev libpng-dev \ + libtiff-dev libdc1394-22-dev libopenblas-dev + RUN apt-get remove -y libprotobuf-dev protobuf-compiler -RUN cd / && git clone https://github.com/google/protobuf.git -RUN cd protobuf && git checkout v3.3.0 && ./autogen.sh && ./configure --prefix=/usr/local && make -j $(cat /proc/cpuinfo | wc -l) && make install && ldconfig + +# Google Test +RUN cd /usr/src/gtest && cmake . && make && mv libgtest* /usr/lib/ + +# Protobuf +RUN cd / && git clone https://github.com/google/protobuf.git && \ + cd protobuf && git checkout v3.6.1 && ./autogen.sh && \ + ./configure --prefix=/usr/local && \ + make -j $(cat /proc/cpuinfo | wc -l) && \ + make install && ldconfig RUN rm -rf /protobuf +# OpenCV +RUN mkdir /opencv && cd /opencv && \ + wget -O opencv.zip https://github.com/opencv/opencv/archive/3.3.0.zip && \ + unzip opencv.zip && cd opencv-3.3.0 && \ + mkdir build && cd build && \ + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_INSTALL_PREFIX=/usr/local .. && \ + make -j $(cat /proc/cpuinfo | wc -l) && make install +RUN rm -rf /opencv + # Blosc -RUN cd / && git clone https://github.com/Blosc/c-blosc.git -RUN cd c-blosc && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX='/usr' .. && cmake --build . && ctest && cmake --build . --target install +RUN cd / && git clone https://github.com/Blosc/c-blosc.git && \ + cd c-blosc && mkdir build && cd build && \ + cmake -DCMAKE_INSTALL_PREFIX='/usr' .. && cmake --build . && \ + cmake --build . --target install RUN rm -rf /c-blosc # Zstd -RUN cd / && wget https://github.com/facebook/zstd/archive/v1.1.0.tar.gz && tar xf v1.1.0.tar.gz -RUN cd zstd-1.1.0 && make install PREFIX='/usr' -RUN cd /usr/src/gtest && cmake . && make && mv libgtest* /usr/lib/ +RUN cd / && wget https://github.com/facebook/zstd/archive/v1.1.0.tar.gz && \ + tar xf v1.1.0.tar.gz && cd zstd-1.1.0 && \ + make install PREFIX='/usr' # TileDB -RUN cd / && wget https://github.com/TileDB-Inc/TileDB/archive/0.6.1.tar.gz && tar xf 0.6.1.tar.gz -RUN cd TileDB-0.6.1 && mkdir build && cd build && cmake .. && make -j $(cat /proc/cpuinfo | wc -l) && make install -RUN rm -rf /TileDB-0.6.1 +RUN cd / && wget https://github.com/TileDB-Inc/TileDB/archive/1.3.1.tar.gz && \ + tar xf 1.3.1.tar.gz && cd TileDB-1.3.1 && mkdir build && cd build && \ + ../bootstrap --prefix=/usr/local/ && make -j && make install-tiledb +RUN rm -rf /TileDB-1.3.1 -# Valijson -RUN cd / && git clone https://github.com/tristanpenman/valijson.git -RUN cd valijson && cp -r include/* /usr/local/include/ +# Faiss +RUN cd / && \ + wget https://github.com/facebookresearch/faiss/archive/v1.4.0.tar.gz && \ + tar xf v1.4.0.tar.gz && \ + cd faiss-1.4.0 && rm CMakeLists.txt && \ + wget https://gist.githubusercontent.com/luisremis/758c71c6a86cb3e4fe2311c415e07547/raw/3c51b5ed7d3f16a4d684d9ea524602c9f1f8593a/CMakeLists.txt && \ + mkdir build && cd build && cmake ../ && make -j +RUN mkdir /usr/local/include/faiss/ +RUN cp -r faiss-1.4.0/* /usr/local/include/faiss/ +RUN mv faiss-1.4.0/build/lib/libfaiss.so /usr/local/lib/ +RUN rm -rf faiss-1.4* -# OpenCV -RUN mkdir /opencv && cd /opencv && wget -O opencv.zip https://github.com/opencv/opencv/archive/3.3.0.zip -RUN cd /opencv && unzip opencv.zip && cd opencv-3.3.0 && mkdir build && cd build && cmake -D CMAKE_BUILD_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local .. && make -j $(cat /proc/cpuinfo | wc -l) && make install && rm -rf /opencv +# PMGD install +RUN cd / && wget https://github.com/intellabs/pmgd/archive/v2.0.0.tar.gz && \ + tar xf v2.0.0.tar.gz && mv pmgd-2.0.0 pmgd && cd pmgd && \ + make PMOPT=MSYNC # VCL install -RUN cd / && wget https://github.com/intellabs/vcl/archive/v0.1.0.tar.gz && tar xf v0.1.0.tar.gz -RUN mv vcl-0.1.0 vcl && cd vcl && scons -j16 +RUN cd / && wget https://github.com/intellabs/vcl/archive/v1.0.0.tar.gz && \ + tar xf v1.0.0.tar.gz && mv vcl-1.0.0 vcl && cd vcl && scons -j16 -# PMGD install -RUN cd / && wget https://github.com/intellabs/pmgd/archive/v1.0.0.tar.gz && tar xf v1.0.0.tar.gz -RUN mv pmgd-1.0.0 pmgd && cd pmgd && make -RUN rm -rf /*.tar.gz +# Valijson +RUN cd / && git clone https://github.com/tristanpenman/valijson.git && \ + cd valijson && cp -r include/* /usr/local/include/ # VDMS install -RUN cd / && wget https://github.com/intellabs/vdms/archive/v1.0.0.tar.gz && tar xf v1.0.0.tar.gz -RUN mv vdms-1.0.0 vdms && cd vdms && mkdir db && scons -j16 INTEL_PATH=/ +RUN git clone https://github.com/intellabs/vdms/archive/v2.0.0.tar.gz && \ + tar xf v2.0.0.tar.gz && mv vdms-2.0.0 vdms && \ + cd vdms && scons -j16 INTEL_PATH=/ RUN rm -rf /*.tar.gz RUN rm -rf /var/lib/apt/lists/* /root/.cache +# Setup entry point + RUN echo '#!/bin/bash' > /start.sh -RUN echo 'export LD_LIBRARY_PATH=/pmgd/lib:$(find /usr/local/lib/ / -type f -name "*.so" | xargs dirname | sort | uniq | tr "\n" ":")' >> /start.sh +RUN echo 'export LD_LIBRARY_PATH=/vdms/utils:/pmgd/lib:$(find /usr/local/lib/ / -type f -name "*.so" | xargs dirname | sort | uniq | tr "\n" ":")' >> /start.sh RUN echo 'cd /vdms' >> /start.sh RUN echo 'rm -rf db' >> /start.sh -RUN echo 'mkdir db' >> /start.sh -RUN echo 'mkdir db/images' >> /start.sh -RUN echo 'mkdir db/images/pngs' >> /start.sh -RUN echo 'mkdir db/images/jpgs' >> /start.sh -RUN echo 'mkdir db/images/tiledb' >> /start.sh -RUN echo 'mkdir db/images/tiledb/tdb' >> /start.sh RUN echo './vdms' >> /start.sh RUN chmod 755 /start.sh -ENTRYPOINT "/start.sh" +EXPOSE 55555 +CMD ["/start.sh"] diff --git a/examples/pyClient/sendQueryFromFile.py b/examples/pyClient/sendQueryFromFile.py deleted file mode 100644 index 8ff4715b..00000000 --- a/examples/pyClient/sendQueryFromFile.py +++ /dev/null @@ -1,50 +0,0 @@ -# -# The MIT License -# -# @copyright Copyright (c) 2017 Intel Corporation -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, -# merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -#! /usr/bin/python -from threading import Thread -import sys -import os -import urllib -import time -import json -import unittest - -import vdms - - -if len(sys.argv) != 2: - print "You must provide a json file" -else: - hostname = "localhost" - - db = vdms.VDMS() - db.connect(hostname) - - with open(sys.argv[1]) as json_file: - query = json.load(json_file) - - response, img_array = db.query(query) - print vdms.aux_print_json(response) \ No newline at end of file diff --git a/examples/pyClient/threadClient.py b/examples/pyClient/threadClient.py deleted file mode 100644 index 4359a1b9..00000000 --- a/examples/pyClient/threadClient.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# The MIT License -# -# @copyright Copyright (c) 2017 Intel Corporation -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, -# merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -#! /usr/bin/python -from threading import Thread -import sys -import os -import urllib -import time - -import vdms # Import vdms - -def clientThread(thId): - - response = vdms.query( - "{HERE GOES YOUR JSON QUERY from Th " + str(thId) + "}") - - print "Thread " + str(thId) + ": " + response - -for i in range(1,1000): - thread = Thread(target=clientThread,args=(i,) ) - thread.start() - diff --git a/run_server.sh b/run_server.sh index f18529b9..8ad1fe07 100644 --- a/run_server.sh +++ b/run_server.sh @@ -1,8 +1,5 @@ rm log.log -rm -r db -mkdir db -mkdir db/images -mkdir db/images/pngs -mkdir db/images/jpgs ./vdms 2> log.log + +# If problems with initialization, try deleting db folder. diff --git a/src/BoundingBoxCommand.cc b/src/BoundingBoxCommand.cc new file mode 100644 index 00000000..44dbf201 --- /dev/null +++ b/src/BoundingBoxCommand.cc @@ -0,0 +1,372 @@ +/** + * @file BoundingBoxCommand.cc + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include + +#include "BoundingBoxCommand.h" +#include "VDMSConfig.h" +#include "defines.h" + +using namespace VDMS; + +//========= AddBoundingBox definitions ========= + +AddBoundingBox::AddBoundingBox() : RSCommand("AddBoundingBox") +{ +} + +int AddBoundingBox::construct_protobuf(PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + int node_ref = get_value(cmd, "_ref", + query.get_available_reference()); + + Json::Value rect = get_value(cmd, "rectangle"); + Json::Value props = get_value(cmd, "properties"); + + props[VDMS_ROI_COORD_X_PROP] = get_value(rect, "x"); + props[VDMS_ROI_COORD_Y_PROP] = get_value(rect, "y"); + props[VDMS_ROI_WIDTH_PROP] = get_value(rect, "w"); + props[VDMS_ROI_HEIGHT_PROP] = get_value(rect, "h"); + + // Add Region node + query.AddNode(node_ref, VDMS_ROI_TAG, props, Json::Value()); + + if ( cmd.isMember("image") ) { + Json::Value img; + img["ref"] = cmd["image"]; + add_link(query, img, node_ref, VDMS_ROI_IMAGE_EDGE); + } + + if ( cmd.isMember("link") ) { + Json::Value ent; + ent = cmd["link"]; + add_link(query, ent, node_ref, VDMS_ROI_EDGE_TAG); + } + + return 0; +} + +//========= UpdateBoundingBox definitions ========= + +UpdateBoundingBox::UpdateBoundingBox() : RSCommand("UpdateBoundingBox") +{ +} + +int UpdateBoundingBox::construct_protobuf(PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + Json::Value rect = get_value(cmd, "rectangle", Json::Value::null); + Json::Value props = get_value(cmd, "properties"); + + if (rect != Json::Value::null) { + props[VDMS_ROI_COORD_X_PROP] = get_value(rect, "x"); + props[VDMS_ROI_COORD_Y_PROP] = get_value(rect, "y"); + props[VDMS_ROI_WIDTH_PROP] = get_value(rect, "w"); + props[VDMS_ROI_HEIGHT_PROP] = get_value(rect, "h"); + } + + // Update Bounding Box + query.UpdateNode(get_value(cmd, "_ref", -1), + VDMS_ROI_TAG, + props, + cmd["remove_props"], + cmd["constraints"], + get_value(cmd, "unique", false)); + + return 0; +} + +//========= FindBoundingBox definitions ========= + +FindBoundingBox::FindBoundingBox() : RSCommand("FindBoundingBox") +{ +} + +int FindBoundingBox::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + // if blob is true, make sure we have a reference, that way we can iterate + // over the bounding boxes and find the link to the image (if it exists) + Json::Value results = get_value(cmd, "results"); + int ref = get_value(cmd, "_ref", query.get_available_reference()); + + bool coords = false; + if (results.isMember("list")) { + for (int i = 0; i < results["list"].size(); ++i) { + if (results["list"][i].asString() == "_coordinates") { + Json::Value aux; + results["list"].removeIndex(i, &aux); + coords = true; + break; + } + } + } + + if (get_value(results, "blob")) + coords = true; + + if (coords) { + results["list"].append(VDMS_ROI_COORD_X_PROP); + results["list"].append(VDMS_ROI_COORD_Y_PROP); + results["list"].append(VDMS_ROI_WIDTH_PROP); + results["list"].append(VDMS_ROI_HEIGHT_PROP); + } + + Json::Value link; + if (cmd.isMember("image")) { + link["ref"] = get_value(cmd, "image", -1); + link["class"] = VDMS_ROI_IMAGE_EDGE; + } + else + link = cmd["link"]; + + Json::Value constraints = cmd["constraints"]; + if (cmd.isMember("rectangle")) { + Json::Value rect = get_value(cmd, "rectangle", -1); + constraints[VDMS_ROI_COORD_X_PROP].append(">="); + constraints[VDMS_ROI_COORD_X_PROP].append(get_value(rect, "x")); + constraints[VDMS_ROI_COORD_X_PROP].append("<="); + constraints[VDMS_ROI_COORD_X_PROP].append(get_value(rect, "w")); + + constraints[VDMS_ROI_COORD_Y_PROP].append(">="); + constraints[VDMS_ROI_COORD_Y_PROP].append(get_value(rect, "y")); + constraints[VDMS_ROI_COORD_Y_PROP].append("<="); + constraints[VDMS_ROI_COORD_Y_PROP].append(get_value(rect, "h")); + + constraints[VDMS_ROI_WIDTH_PROP].append("<="); + constraints[VDMS_ROI_WIDTH_PROP].append(get_value(rect, "w") + + get_value(rect, "x")); + + constraints[VDMS_ROI_HEIGHT_PROP].append("<="); + constraints[VDMS_ROI_HEIGHT_PROP].append(get_value(rect, "h") + + get_value(rect, "y")); + } + + query.QueryNode( + ref, + VDMS_ROI_TAG, + link, + constraints, + results, + get_value(cmd, "unique", false) + ); + + if (get_value(results, "blob", false)) { + Json::Value imgresults; + imgresults["list"].append(VDMS_IM_PATH_PROP); + + Json::Value imglink; + imglink["ref"] = ref; + + query.QueryNode( + -1, + VDMS_IM_TAG, + imglink, + Json::Value::null, + imgresults, + get_value(cmd, "unique", false) + ); + } + + return 0; +} + +Json::Value FindBoundingBox::construct_responses( + Json::Value& responses, + const Json::Value& json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + const Json::Value& cmd = json[_cmd_name]; + const Json::Value& results = cmd["results"]; + + Json::Value ret; + + auto error = [&](Json::Value& res) + { + ret[_cmd_name] = res; + return ret; + }; + + if (responses.size() == 0) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "PMGD Found Nothing when Looking for BoundingBoxes"; + return error(return_error); + } + + Json::Value& findBB = responses[0]; + + if (findBB["status"] != 0) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "BoundingBox Not Found"; + return error(return_error); + } + + Json::Value entities = findBB["entities"]; + findBB.removeMember("entities"); + + for ( int i = 0; i < entities.size(); ++i ) { + auto ent = entities[i]; + Json::Value bb; + + Json::Value coords; + if (ent.isMember(VDMS_ROI_COORD_X_PROP) && + ent.isMember(VDMS_ROI_COORD_Y_PROP) && + ent.isMember(VDMS_ROI_WIDTH_PROP) && + ent.isMember(VDMS_ROI_HEIGHT_PROP)) { + coords["x"] = ent[VDMS_ROI_COORD_X_PROP]; + coords["y"] = ent[VDMS_ROI_COORD_Y_PROP]; + coords["w"] = ent[VDMS_ROI_WIDTH_PROP]; + coords["h"] = ent[VDMS_ROI_HEIGHT_PROP]; + } + + if (results.isMember("list")) { + for (int i = 0; i < results["list"].size(); ++i) { + auto current = results["list"][i].asString(); + if ( current == "_coordinates") { + bb["_coordinates"] = coords; + } + else { + bb[current] = ent[current]; + } + } + } + + if (get_value(results, "blob", false)) { + if (responses.size() < 1) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "BoundingBox is Missing Corresponding Blob"; + return error(return_error); + } + + Json::Value& findImage = responses[1]; + if (findImage["status"] != 0) { + findImage["status"] = RSCommand::Error; + // Uses PMGD info error. + error(findImage); + } + if (findImage["entities"].size() <= i) + continue; + else { + bool flag_empty = true; + + auto img_ent = findImage["entities"][i]; + + assert(img_ent.isMember(VDMS_IM_PATH_PROP)); + std::string im_path = img_ent[VDMS_IM_PATH_PROP].asString(); + img_ent.removeMember(VDMS_IM_PATH_PROP); + + if (img_ent.getMemberNames().size() > 0) { + flag_empty = false; + } + + try { + VCL::Image img(im_path); + img.crop(VCL::Rectangle( + get_value(coords, "x"), + get_value(coords, "y"), + get_value(coords, "w"), + get_value(coords, "h"))); + + VCL::Image::Format format = + img.get_image_format() != VCL::Image::Format::TDB ? + img.get_image_format() : VCL::Image::Format::PNG; + + if (cmd.isMember("format")) { + std::string requested_format = + get_value(cmd, "format"); + + if (requested_format == "png") { + format = VCL::Image::Format::PNG; + } + else if(requested_format == "jpg") { + format = VCL::Image::Format::JPG; + } + } + + std::vector roi_enc; + roi_enc = img.get_encoded_image(format); + + if (!roi_enc.empty()) { + std::string* img_str = query_res.add_blobs(); + img_str->resize(roi_enc.size()); + std::memcpy((void*)img_str->data(), + (void*)roi_enc.data(), + roi_enc.size()); + } + else { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Image Data not found"; + error(return_error); + } + } catch (VCL::Exception e) { + print_exception(e); + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "VCL Exception"; + error(return_error); + } + + if (!flag_empty) { + findImage.removeMember("entities"); + } + } + } + + findBB["entities"].append(bb); + } + + findBB["status"] = RSCommand::Success; + ret[_cmd_name] = findBB; + + return ret; +} diff --git a/src/BoundingBoxCommand.h b/src/BoundingBoxCommand.h new file mode 100644 index 00000000..140e77fa --- /dev/null +++ b/src/BoundingBoxCommand.h @@ -0,0 +1,85 @@ +/** + * @file BoundingBoxCommand.h + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#pragma once +#include +#include +#include +#include "VCL.h" + +#include "RSCommand.h" +#include "ExceptionsCommand.h" + +namespace VDMS { + + class AddBoundingBox : public RSCommand + { + public: + AddBoundingBox(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + }; + + class UpdateBoundingBox : public RSCommand + { + public: + UpdateBoundingBox(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + }; + + class FindBoundingBox : public RSCommand + { + public: + FindBoundingBox(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + Json::Value construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + }; + +}; // namespace VDMS diff --git a/src/DescriptorsCommand.cc b/src/DescriptorsCommand.cc new file mode 100644 index 00000000..aca5c1ed --- /dev/null +++ b/src/DescriptorsCommand.cc @@ -0,0 +1,890 @@ +/** + * @file DescriptorsCommand.cc + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include + +#include "VDMSConfig.h" +#include "DescriptorsCommand.h" +#include "ExceptionsCommand.h" +#include "defines.h" + +using namespace VDMS; + +DescriptorsCommand::DescriptorsCommand(const std::string& cmd_name) : + RSCommand(cmd_name) +{ + _dm = DescriptorsManager::instance(); +} + +// This function only throws when there is a transaction error, +// but not when there is an input error (such as wrong set_name). +// In case of wrong input, we need to inform to the user what +// went wrong. +std::string DescriptorsCommand::get_set_path(PMGDQuery &query_tx, + const std::string& set_name, + int& dim) +{ + // Will issue a read-only transaction to check + // if the Set exists + PMGDQuery query(query_tx.get_pmgd_qh()); + + Json::Value constraints, link; + Json::Value name_arr; + name_arr.append("=="); + name_arr.append(set_name); + constraints[VDMS_DESC_SET_NAME_PROP] = name_arr; + + Json::Value results; + Json::Value list_arr; + list_arr.append(VDMS_DESC_SET_PATH_PROP); + list_arr.append(VDMS_DESC_SET_DIM_PROP); + results["list"] = list_arr; + + bool unique = true; + + // Query set node + query.add_group(); + query.QueryNode(-1, VDMS_DESC_SET_TAG, link, constraints, results, unique); + + Json::Value& query_responses = query.run(); + + if(query_responses.size() != 1 && query_responses[0].size() != 1) { + throw ExceptionCommand(DescriptorSetError, "PMGD Transaction Error"); + } + + Json::Value& set_json = query_responses[0][0]; + + if(!set_json.isMember("entities")) { + throw ExceptionCommand(DescriptorSetError, "PMGD Transaction Error"); + } + + for (auto& ent : set_json["entities"]) { + assert(ent.isMember(VDMS_DESC_SET_PATH_PROP)); + std::string set_path = ent[VDMS_DESC_SET_PATH_PROP].asString(); + dim = ent[VDMS_DESC_SET_DIM_PROP].asInt(); + return set_path; + } + + return ""; +} + +bool DescriptorsCommand::check_blob_size(const std::string& blob, const int dimensions, const long n_desc) +{ + return (blob.size() / sizeof(float) / dimensions == n_desc); +} + +// AddDescriptorSet Methods + +AddDescriptorSet::AddDescriptorSet() : + DescriptorsCommand("AddDescriptorSet") +{ + _storage_sets = VDMSConfig::instance()->get_path_descriptors(); +} + +int AddDescriptorSet::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value &cmd = jsoncmd[_cmd_name]; + + // Create PMGD cmd for AddNode + int node_ref = get_value(cmd, "_ref", + query.get_available_reference()); + + std::string set_name = cmd["name"].asString(); + std::string desc_set_path = _storage_sets + "/" + set_name; + + Json::Value props = get_value(cmd, "properties"); + props[VDMS_DESC_SET_NAME_PROP] = cmd["name"].asString(); + props[VDMS_DESC_SET_DIM_PROP] = cmd["dimensions"].asInt(); + props[VDMS_DESC_SET_PATH_PROP] = desc_set_path; + + Json::Value constraints; + constraints[VDMS_DESC_SET_NAME_PROP].append("=="); + constraints[VDMS_DESC_SET_NAME_PROP].append(cmd["name"].asString()); + + query.AddNode(node_ref, VDMS_DESC_SET_TAG, props, constraints); + + if (cmd.isMember("link")) { + add_link(query, cmd["link"], node_ref, VDMS_DESC_SET_EDGE_TAG); + } + + return 0; +} + +Json::Value AddDescriptorSet::construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + const Json::Value &cmd = json[_cmd_name]; + Json::Value resp = check_responses(json_responses); + + Json::Value ret; + + auto error = [&](Json::Value& res) + { + ret[_cmd_name] = res; + return ret; + }; + + if (resp["status"] != RSCommand::Success) { + error(resp); + } + + int dimensions = cmd["dimensions"].asInt(); + std::string set_name = cmd["name"].asString(); + std::string desc_set_path = _storage_sets + "/" + set_name; + + std::string metric_str = get_value(cmd, "metric", "L2"); + VCL::DistanceMetric metric = metric_str == "L2"? VCL::L2 : VCL::IP; + + // For now, we use the default faiss index. + std::string eng_str = get_value(cmd, "engine", "FaissFlat"); + VCL::DescriptorSetEngine eng; + + if (eng_str == "FaissFlat") + eng = VCL::FaissFlat; + else if (eng_str == "FaissIVFFlat") + eng = VCL::FaissIVFFlat; + else if (eng_str == "TileDBDense") + eng = VCL::TileDBDense; + else if (eng_str == "TileDBSparse") + eng = VCL::TileDBSparse; + else + throw ExceptionCommand(DescriptorSetError, "Engine not supported"); + + // We can probably set up a mechanism + // to fix a broken link when detected later, same with images. + try { + VCL::DescriptorSet desc_set(desc_set_path, dimensions, eng, metric); + desc_set.store(); + } + catch (VCL::Exception e) { + print_exception(e); + resp["status"] = RSCommand::Error; + resp["info"] = std::string("VCL Exception: ") + e.msg; + return error(resp); + } + + resp.clear(); + resp["status"] = RSCommand::Success; + + ret[_cmd_name] = resp; + return ret; +} + +// AddDescriptor Methods + +AddDescriptor::AddDescriptor() : + DescriptorsCommand("AddDescriptor") +{ +} + +long AddDescriptor::insert_descriptor(const std::string& blob, + const std::string& set_path, + int dim, + const std::string& label, + Json::Value& error) +{ + long id_first; + + try { + + VCL::DescriptorSet* desc_set = _dm->get_descriptors_handler(set_path); + + if (blob.length()/4 != dim) { + std::cerr << "AddDescriptor::insert_descriptor: "; + std::cerr << "Dimensions mismatch: "; + std::cerr << blob.length()/4 << " " << dim << std::endl; + error["info"] = "Blob Dimensions Mismatch"; + return -1; + } + + if (!label.empty()) { + long label_id = desc_set->get_label_id(label); + long* label_ptr = &label_id; + id_first = desc_set->add((float*)blob.data(), 1, label_ptr); + } + else { + id_first = desc_set->add((float*)blob.data(), 1); + } + + } catch (VCL::Exception e) { + print_exception(e); + error["info"] = "VCL Descriptors Exception"; + return -1; + } + + return id_first; +} + +int AddDescriptor::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value &cmd = jsoncmd[_cmd_name]; + + const std::string set_name = cmd["set"].asString(); + + Json::Value props = get_value(cmd, "properties"); + + std::string label; + if (cmd.isMember("label")) { + label = cmd["label"].asString(); + props[VDMS_DESC_LABEL_PROP] = label; + } + + int dimensions; + std::string set_path = get_set_path(query, set_name, dimensions); + + if (set_path.empty()) { + error["info"] = "Set " + set_name + " not found"; + error["status"] = RSCommand::Error; + return -1; + } + + long id = insert_descriptor(blob, set_path, dimensions, label, error); + + if (id < 0) { + error["status"] = RSCommand::Error; + return -1; + } + + props[VDMS_DESC_ID_PROP] = Json::Int64(id); + + int node_ref = get_value(cmd, "_ref", + query.get_available_reference()); + + query.AddNode(node_ref, VDMS_DESC_TAG, props, Json::nullValue); + + // It passed the checker, so it exists. + int set_ref = query.get_available_reference(); + + Json::Value link; + Json::Value results; + Json::Value list_arr; + list_arr.append(VDMS_DESC_SET_PATH_PROP); + list_arr.append(VDMS_DESC_SET_DIM_PROP); + results["list"] = list_arr; + + Json::Value constraints; + Json::Value name_arr; + name_arr.append("=="); + name_arr.append(set_name); + constraints[VDMS_DESC_SET_NAME_PROP] = name_arr; + + bool unique = true; + + // Query set node + query.QueryNode(set_ref, VDMS_DESC_SET_TAG, link, constraints, results, unique); + + if (cmd.isMember("link")) { + add_link(query, cmd["link"], node_ref, VDMS_DESC_EDGE_TAG); + } + + Json::Value props_edge; + query.AddEdge(-1, set_ref, node_ref, VDMS_DESC_SET_EDGE_TAG, props_edge); + + return 0; +} + +Json::Value AddDescriptor::construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + Json::Value resp = check_responses(json_responses); + + Json::Value ret; + ret[_cmd_name] = resp; + return ret; +} + +// ClassifyDescriptors Methods + +ClassifyDescriptor::ClassifyDescriptor() : + DescriptorsCommand("ClassifyDescriptor") +{ +} + +int ClassifyDescriptor::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value &cmd = jsoncmd[_cmd_name]; + + const std::string set_name = cmd["set"].asString(); + + int dimensions; + const std::string set_path = get_set_path(query, set_name, dimensions); + + if (set_path.empty()) { + error["status"] = RSCommand::Error; + error["info"] = "DescritorSet Not Found!"; + return -1; + } + + Json::Value link; + Json::Value results; + Json::Value list_arr; + list_arr.append(VDMS_DESC_SET_PATH_PROP); + list_arr.append(VDMS_DESC_SET_DIM_PROP); + results["list"] = list_arr; + + Json::Value constraints; + Json::Value name_arr; + name_arr.append("=="); + name_arr.append(set_name); + constraints[VDMS_DESC_SET_NAME_PROP] = name_arr; + + bool unique = true; + + // Query set node + query.QueryNode( + get_value(cmd, "_ref", -1), + VDMS_DESC_SET_TAG, + link, constraints, results, unique); + + return 0; +} + +Json::Value ClassifyDescriptor::construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + Json::Value classifyDesc; + const Json::Value &cmd = json[_cmd_name]; + + Json::Value ret; + + bool flag_error = false; + + if (json_responses.size() == 0) { + classifyDesc["status"] = RSCommand::Error; + classifyDesc["info"] = "Not Found!"; + flag_error = true; + ret[_cmd_name] = classifyDesc; + return ret; + } + + for (auto res : json_responses) { + + if (res["status"] != 0) { + flag_error = true; + break; + } + + if (!res.isMember("entities")) + continue; + + classifyDesc = res; + + for (auto& ent : classifyDesc["entities"]) { + + assert(ent.isMember(VDMS_DESC_SET_PATH_PROP)); + std::string set_path = ent[VDMS_DESC_SET_PATH_PROP].asString(); + try { + VCL::DescriptorSet* set = _dm->get_descriptors_handler(set_path); + + auto labels = set->classify((float*)blob.data(), 1); + + if (labels.size() == 0) { + classifyDesc["info"] = "No labels, cannot classify"; + classifyDesc["status"] = RSCommand::Error; + } + else { + classifyDesc["label"] = (set->label_id_to_string(labels)).at(0); + } + } catch (VCL::Exception e) { + print_exception(e); + classifyDesc["status"] = RSCommand::Error; + classifyDesc["info"] = "VCL Exception"; + flag_error = true; + break; + } + } + } + + if (!flag_error) { + classifyDesc["status"] = RSCommand::Success; + } + + classifyDesc.removeMember("entities"); + + ret[_cmd_name] = classifyDesc; + + return ret; +} + +// FindDescriptors Methods + +FindDescriptor::FindDescriptor() : + DescriptorsCommand("FindDescriptor") +{ +} + +bool FindDescriptor::need_blob(const Json::Value& cmd) +{ + return cmd[_cmd_name].isMember("k_neighbors"); +} + +int FindDescriptor::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& cp_result) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + const std::string set_name = cmd["set"].asString(); + + int dimensions; + const std::string set_path = get_set_path(query, set_name, dimensions); + + if (set_path.empty()) { + cp_result["status"] = RSCommand::Error; + cp_result["info"] = "DescritorSet Not Found!"; + return -1; + } + + Json::Value results_set; + Json::Value list_arr_set; + list_arr_set.append(VDMS_DESC_SET_PATH_PROP); + list_arr_set.append(VDMS_DESC_SET_DIM_PROP); + results_set["list"] = list_arr_set; + + Json::Value constraints_set; + Json::Value name_arr; + name_arr.append("=="); + name_arr.append(set_name); + constraints_set[VDMS_DESC_SET_NAME_PROP] = name_arr; + + bool unique = true; + + Json::Value constraints = cmd["constraints"]; + if (constraints.isMember("_label")) { + constraints[VDMS_DESC_LABEL_PROP] = constraints["_label"]; + constraints.removeMember("_label"); + } + if (constraints.isMember("_id")) { + constraints[VDMS_DESC_ID_PROP] = constraints["_id"]; + constraints.removeMember("_id"); + } + + Json::Value results = cmd["results"]; + + // Add label/id as required. + // Remove the variables with "_" + if (results.isMember("list")) { + + results["list"].append(VDMS_DESC_LABEL_PROP); + results["list"].append(VDMS_DESC_ID_PROP); + + int pos = -1; + for (int i = 0; i < results["list"].size(); ++i) { + if (results["list"][i].asString() == "_label" || + results["list"][i].asString() == "_id" || + results["list"][i].asString() == "_distance" ) { + pos = i; + Json::Value aux; + results["list"].removeIndex(i, &aux); + --i; + } + } + } + + // Case (1) + if (cmd.isMember("link")) { + + // Query for the Descriptors related to user-defined link + // that match the user-defined constraints + // We will need to the the AND operation + // on the construct_response. + + int desc_ref = get_value(cmd, "_ref", + query.get_available_reference()); + + query.QueryNode( + desc_ref, + VDMS_DESC_TAG, + cmd["link"], constraints, results, false); + + Json::Value link_to_desc; + link_to_desc["ref"] = desc_ref; + + // Query for the set + query.QueryNode( + -1, + VDMS_DESC_SET_TAG, + link_to_desc, constraints_set, results_set, unique); + } + // Case (2) + else if (!cmd.isMember("k_neighbors")) { + + // In this case, we either need properties of the descriptor + // ("list") on the results block, or we need the descriptor nodes + // because the user defined a reference. + + int ref_set = query.get_available_reference(); + + Json::Value link_set; // null + + // Query for the set + query.QueryNode( + ref_set, + VDMS_DESC_SET_TAG, + link_set, constraints_set, results_set, unique); + + Json::Value link_to_set; + link_to_set["ref"] = ref_set; + + // Query for the Descriptors related to that set + // that match the user-defined constraints + query.QueryNode( + get_value(cmd, "_ref", -1), + VDMS_DESC_TAG, + link_to_set, constraints, results, false); + } + // Case (3), Just want the descriptor by value, we only need the set + else { + Json::Value link_null; // null + + const int k_neighbors = get_value(cmd, "k_neighbors", 0); + + int ref_set = query.get_available_reference(); + + // Query for the set and detect if exist during transaction. + query.QueryNode( + ref_set, + VDMS_DESC_SET_TAG, + Json::nullValue, constraints_set, results_set, true); + + Json::Value link_to_set; + link_to_set["ref"] = ref_set; + + if (!check_blob_size(blob, dimensions, 1)) { + cp_result["status"] = RSCommand::Error; + cp_result["info"] = "Blob (required) is null or size invalid"; + return -1; + } + + try { + VCL::DescriptorSet* set = _dm->get_descriptors_handler(set_path); + + // This is a way to pass state to the construct_response + // We just pass the cache_object_id. + auto cache_obj_id = VCL::get_uint64(); + cp_result["cache_obj_id"] = Json::Int64(cache_obj_id); + + _cache_map[cache_obj_id] = new IDDistancePair(); + + IDDistancePair* pair = _cache_map[cache_obj_id]; + std::vector& ids = pair->first; + std::vector& distances = pair->second; + + set->search((float*)blob.data(), 1, k_neighbors, ids, distances); + + long returned_counter = 0; + std::string blob_return; + + Json::Value ids_array; + + for (int i = 0; i < ids.size(); ++i) { + if (ids[i] >= 0) { + ids_array.append(Json::Int64(ids[i])); + } + else { + ids.erase(ids.begin() + i, ids.end()); + distances.erase(distances.begin() + i, distances.end()); + break; + } + } + + // This are needed to construct the response. + if (!results.isMember("list")) { + results["list"].append(VDMS_DESC_LABEL_PROP); + results["list"].append(VDMS_DESC_ID_PROP); + } + + Json::Value node_constraints = constraints; + node_constraints[VDMS_DESC_ID_PROP].append("=="); + node_constraints[VDMS_DESC_ID_PROP].append(ids_array); + + query.QueryNode( + get_value(cmd, "_ref", -1), + VDMS_DESC_TAG, + link_to_set, node_constraints, results, false); + + } catch (VCL::Exception e) { + print_exception(e); + cp_result["status"] = RSCommand::Error; + cp_result["info"] = "VCL Exception"; + return -1; + } + } + + return 0; +} + +void FindDescriptor::convert_properties(Json::Value& entities) +{ + for (auto& element : entities) { + + if (element.isMember(VDMS_DESC_LABEL_PROP)) { + element["_label"] = element[VDMS_DESC_LABEL_PROP]; + element.removeMember(VDMS_DESC_LABEL_PROP); + } + if (element.isMember(VDMS_DESC_ID_PROP)) { + element["_id"] = element[VDMS_DESC_ID_PROP]; + element.removeMember(VDMS_DESC_ID_PROP); + } + } +} + +Json::Value FindDescriptor::construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + Json::Value findDesc; + const Json::Value &cmd = json[_cmd_name]; + const Json::Value &cache = json["cp_result"]; + + Json::Value ret; + + bool flag_error = false; + + auto error = [&](Json::Value& res) + { + ret[_cmd_name] = res; + return ret; + }; + + if (json_responses.size() == 0) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Not Found!"; + return error(return_error); + } + + const Json::Value& results = cmd["results"]; + + // Case (1) + if (cmd.isMember("link")) { + + assert(json_responses.size() == 2); + + findDesc = json_responses[0]; + + if (findDesc["status"] != 0) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Descriptors Not Found"; + return error(return_error); + } + } + // Case (2) + else if (!cmd.isMember("k_neighbors")) { + + assert(json_responses.size() == 2); + + findDesc = json_responses[1]; + + if (findDesc.isMember("entities")) { + convert_properties(findDesc["entities"]); + } + + if (findDesc["status"] != 0) { + std::cerr << json_responses.toStyledString() << std::endl; + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Descriptors Not Found"; + return error(return_error); + } + } + // Case (3) + else{ + + assert(json_responses.size() == 2); + + // Get Set info. + const Json::Value& set_response = json_responses[0]; + + if (set_response["status"] != 0 || + !set_response.isMember("entities")) { + + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Set Not Found"; + return error(return_error); + } + + assert(set_response["entities"].size() == 1); + + const Json::Value& set = set_response["entities"][0]; + + // This properties should always exist + assert(set.isMember(VDMS_DESC_SET_PATH_PROP)); + assert(set.isMember(VDMS_DESC_SET_DIM_PROP)); + std::string set_path = set[VDMS_DESC_SET_PATH_PROP].asString(); + int dim = set[VDMS_DESC_SET_DIM_PROP].asInt(); + + if (!check_blob_size(blob, dim, 1)) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Blob (required) is null or size invalid"; + return error(return_error); + } + + std::vector* ids; + std::vector* distances; + + bool compute_distance = false; + if (results.isMember("list")) { + + for (int i = 0; i < results["list"].size(); ++i) { + if (results["list"][i].asString() == "_distance") { + compute_distance = true; + break; + } + + } + } + + // Test whether there is any cached result. + assert(cache.isMember("cache_obj_id")); + + long cache_obj_id = cache["cache_obj_id"].asInt64(); + + // Get from Cache + IDDistancePair* pair = _cache_map[cache_obj_id]; + ids = &(pair->first); + distances = &(pair->second); + + findDesc = json_responses[1]; + + if (findDesc["status"] != 0 || !findDesc.isMember("entities")) { + + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Descriptor Not Found in graph!"; + return error(return_error); + } + + Json::Value entities = findDesc["entities"]; + findDesc.removeMember("entities"); + + convert_properties(entities); + + for (int i = 0; i < (*ids).size(); ++i) { + + Json::Value desc_data; + + long d_id = (*ids)[i]; + bool pass_constraints = false; + + for (auto ent : entities) { + if (ent["_id"].asInt64() == d_id) { + desc_data = ent; + pass_constraints = true; + break; + } + } + + if (!pass_constraints) + continue; + + if (compute_distance) { + + desc_data["_distance"] = (*distances)[i]; + + // Should be already sorted, + // but if not, we need to match the id with + // whatever is on the cache + // desc_data["cache_id"] = Json::Int64((*ids)[i]); + } + + if (results.isMember("blob")) { + + desc_data["blob"] = true; + + try { + VCL::DescriptorSet* set = + _dm->get_descriptors_handler(set_path); + + std::string* desc_blob = query_res.add_blobs(); + desc_blob->resize(sizeof(float) * dim); + + set->get_descriptors(&(*ids)[i], 1, + (float*)(*desc_blob).data()); + + } catch (VCL::Exception e) { + print_exception(e); + findDesc["status"] = RSCommand::Error; + findDesc["info"] = "VCL Exception"; + return error(findDesc); + } + + } + + findDesc["entities"].append(desc_data); + } + + if (cache.isMember("cache_obj_id")) { + // We remove the vectors associated with that entry to + // free memory, without removing the entry from _cache_map + // because tbb does not have a lock free way to do this. + IDDistancePair* pair = _cache_map[cache["cache_obj_id"].asInt64()]; + delete pair; + } + } + + findDesc["status"] = RSCommand::Success; + ret[_cmd_name] = findDesc; + + return ret; +} diff --git a/src/DescriptorsCommand.h b/src/DescriptorsCommand.h new file mode 100644 index 00000000..84811447 --- /dev/null +++ b/src/DescriptorsCommand.h @@ -0,0 +1,177 @@ +/** + * @file DescriptorsCommand.h + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#pragma once +#include +#include +#include +#include + +#include +#include + +#include "QueryHandler.h" // to provide the database connection +#include "DescriptorsManager.h" +#include "tbb/concurrent_unordered_map.h" + +namespace VDMS{ + + typedef std::pair, std::vector> IDDistancePair; + + // This class encapsulates common behavior of Descriptors-related cmds. + class DescriptorsCommand : public RSCommand + { + protected: + DescriptorsManager* _dm; + + // IDDistancePair is a pointer so that we can free its content + // without having to use erase methods, which are not lock free + // for this data structure in tbb + tbb::concurrent_unordered_map _cache_map; + + // Will return the path to the set and the dimensions + std::string get_set_path(PMGDQuery& query_tx, + const std::string& set, int& dim); + + bool check_blob_size(const std::string& blob, const int dimensions, + const long n_desc); + + public: + DescriptorsCommand(const std::string& cmd_name); + + virtual bool need_blob(const Json::Value& cmd) { return false; } + + virtual int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error) = 0; + + virtual Json::Value construct_responses( + Json::Value& json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob) = 0; + }; + + class AddDescriptorSet: public DescriptorsCommand + { + std::string _storage_sets; + + public: + AddDescriptorSet(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + Json::Value construct_responses( + Json::Value& json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + }; + + class AddDescriptor: public DescriptorsCommand + { + long insert_descriptor(const std::string& blob, + const std::string& path, + int dim, + const std::string& label, + Json::Value& error); + + public: + AddDescriptor(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + bool need_blob(const Json::Value& cmd) { return true; } + + Json::Value construct_responses( + Json::Value& json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + }; + + class ClassifyDescriptor: public DescriptorsCommand + { + + public: + ClassifyDescriptor(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + bool need_blob(const Json::Value& cmd) { return true; } + + Json::Value construct_responses( + Json::Value& json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + + }; + + class FindDescriptor: public DescriptorsCommand + { + + private: + void convert_properties(Json::Value& entities); + + public: + FindDescriptor(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + bool need_blob(const Json::Value& cmd); + + Json::Value construct_responses( + Json::Value& json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + + }; + } diff --git a/src/DescriptorsManager.cc b/src/DescriptorsManager.cc new file mode 100644 index 00000000..9c7d0a77 --- /dev/null +++ b/src/DescriptorsManager.cc @@ -0,0 +1,85 @@ +/** + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include +#include "DescriptorsManager.h" + +using namespace VDMS; + +DescriptorsManager* DescriptorsManager::_dm; + +bool DescriptorsManager::init() +{ + if(_dm) + return false; + + _dm = new DescriptorsManager(); + return true; +} + +DescriptorsManager* DescriptorsManager::instance() +{ + if(_dm) + return _dm; + + std::cerr << "ERROR: DescriptorsManager not init" << std::endl; + return NULL; +} + +DescriptorsManager::DescriptorsManager() +{ +} + +void DescriptorsManager::flush() +{ + for (auto desc_set : _descriptors_handlers) { + desc_set.second->store(); + delete desc_set.second; + } + _descriptors_handlers.clear(); +} + +VCL::DescriptorSet* DescriptorsManager::get_descriptors_handler( + std::string path) +{ + VCL::DescriptorSet* desc_ptr; + + auto element = _descriptors_handlers.find(path); + + if (element == _descriptors_handlers.end()) { + desc_ptr = new VCL::DescriptorSet(path); + _descriptors_handlers[path] = desc_ptr; + } + else { + desc_ptr = element->second; + } + + return desc_ptr; +} + diff --git a/src/DescriptorsManager.h b/src/DescriptorsManager.h new file mode 100644 index 00000000..7b22b547 --- /dev/null +++ b/src/DescriptorsManager.h @@ -0,0 +1,64 @@ +/** + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#pragma once + +#include +#include +#include +#include + +#include "VCL.h" +#include "tbb/concurrent_unordered_map.h" + +namespace VDMS { + + class DescriptorsManager + { + static DescriptorsManager* _dm; + tbb::concurrent_unordered_map + _descriptors_handlers; + + DescriptorsManager(); + + public: + + static bool init(); + static DescriptorsManager* instance(); + + /** + * Handles descriptors and lock for the descriptor + * This is a blocking call until the descriptor is free + * + * @param path Path to the descriptor set + */ + VCL::DescriptorSet* get_descriptors_handler(std::string path); + void flush(); + }; +}; diff --git a/src/ExceptionsCommand.h b/src/ExceptionsCommand.h index a5a68b50..f3d5fe44 100644 --- a/src/ExceptionsCommand.h +++ b/src/ExceptionsCommand.h @@ -43,6 +43,8 @@ namespace VDMS { DescriptorError, DescriptorSetError, PMGDTransactiontError, + LockTimeout, + LockError, Undefined = 100,// Any undefined error }; diff --git a/src/ImageCommand.cc b/src/ImageCommand.cc index eaaedf2e..bda9229e 100644 --- a/src/ImageCommand.cc +++ b/src/ImageCommand.cc @@ -33,13 +33,10 @@ #include "ImageCommand.h" #include "VDMSConfig.h" +#include "defines.h" using namespace VDMS; -#define VDMS_IM_TAG "AT:IMAGE" -#define VDMS_IM_PATH_PROP "imgPath" -#define VDMS_IM_EDGE "AT:IMG_LINK" - //========= AddImage definitions ========= ImageCommand::ImageCommand(const std::string &cmd_name): @@ -66,20 +63,26 @@ void ImageCommand::enqueue_operations(VCL::Image& img, const Json::Value& ops) get_value(op, "width"), get_value(op, "height") )); } + else if (type == "flip") { + img.flip(get_value(op, "code")); + } + else if (type == "rotate") { + img.rotate(get_value(op, "angle"), + get_value(op, "resize")); + } else { throw ExceptionCommand(ImageError, "Operation not defined"); } } } +//========= AddImage definitions ========= + AddImage::AddImage() : ImageCommand("AddImage") { - _storage_tdb = VDMSConfig::instance() - ->get_string_value("tdb_path", DEFAULT_TDB_PATH); - _storage_png = VDMSConfig::instance() - ->get_string_value("png_path", DEFAULT_PNG_PATH); - _storage_jpg = VDMSConfig::instance() - ->get_string_value("jpg_path", DEFAULT_JPG_PATH); + _storage_tdb = VDMSConfig::instance()->get_path_tdb(); + _storage_png = VDMSConfig::instance()->get_path_png(); + _storage_jpg = VDMSConfig::instance()->get_path_jpg(); } int AddImage::construct_protobuf(PMGDQuery& query, @@ -100,21 +103,21 @@ int AddImage::construct_protobuf(PMGDQuery& query, } std::string img_root = _storage_tdb; - VCL::ImageFormat vcl_format = VCL::TDB; + VCL::Image::Format vcl_format = VCL::Image::Format::TDB; + std::string format = get_value(cmd, "format", ""); if (cmd.isMember("format")) { - std::string format = get_value(cmd, "format"); if (format == "png") { - vcl_format = VCL::PNG; + vcl_format = VCL::Image::Format::PNG; img_root = _storage_png; } else if (format == "tdb") { - vcl_format = VCL::TDB; + vcl_format = VCL::Image::Format::TDB; img_root = _storage_tdb; } else if (format == "jpg") { - vcl_format = VCL::JPG; + vcl_format = VCL::Image::Format::JPG; img_root = _storage_jpg; } else { @@ -124,7 +127,7 @@ int AddImage::construct_protobuf(PMGDQuery& query, } } - std::string file_name = img.create_unique(img_root, vcl_format); + std::string file_name = VCL::create_unique(img_root, format); // Modifiyng the existing properties that the user gives // is a good option to make the AddNode more simple. @@ -142,12 +145,37 @@ int AddImage::construct_protobuf(PMGDQuery& query, error["image_added"] = file_name; if (cmd.isMember("link")) { - add_link(query, cmd["link"], node_ref, VDMS_IM_EDGE); + add_link(query, cmd["link"], node_ref, VDMS_IM_EDGE_TAG); } return 0; } +//========= UpdateImage definitions ========= + +UpdateImage::UpdateImage() : ImageCommand("UpdateImage") +{ +} + +int UpdateImage::construct_protobuf(PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + // Update Image node + query.UpdateNode(get_value(cmd, "_ref", -1), + VDMS_IM_TAG, + cmd["properties"], + cmd["remove_props"], + cmd["constraints"], + get_value(cmd, "unique", false)); + + return 0; +} + //========= FindImage definitions ========= FindImage::FindImage() : ImageCommand("FindImage") @@ -164,7 +192,11 @@ int FindImage::construct_protobuf( const Json::Value& cmd = jsoncmd[_cmd_name]; Json::Value results = get_value(cmd, "results"); - results["list"].append(VDMS_IM_PATH_PROP); + + // Unless otherwhis specified, we return the blob. + if (get_value(results, "blob", true)){ + results["list"].append(VDMS_IM_PATH_PROP); + } query.QueryNode( get_value(cmd, "_ref", -1), @@ -181,7 +213,8 @@ int FindImage::construct_protobuf( Json::Value FindImage::construct_responses( Json::Value& responses, const Json::Value& json, - protobufs::queryMessage &query_res) + protobufs::queryMessage &query_res, + const std::string &blob) { const Json::Value& cmd = json[_cmd_name]; @@ -197,7 +230,7 @@ Json::Value FindImage::construct_responses( Json::Value return_error; return_error["status"] = RSCommand::Error; return_error["info"] = "PMGD Response Bad Size"; - error(return_error); + return error(return_error); } Json::Value& findImage = responses[0]; @@ -207,14 +240,16 @@ Json::Value FindImage::construct_responses( if (findImage["status"] != 0) { findImage["status"] = RSCommand::Error; // Uses PMGD info error. - error(findImage); + return error(findImage); } bool flag_empty = true; for (auto& ent : findImage["entities"]) { - assert(ent.isMember(VDMS_IM_PATH_PROP)); + if(!ent.isMember(VDMS_IM_PATH_PROP)){ + continue; + } std::string im_path = ent[VDMS_IM_PATH_PROP].asString(); ent.removeMember(VDMS_IM_PATH_PROP); @@ -232,24 +267,24 @@ Json::Value FindImage::construct_responses( // We will return the image in the format the user // request, or on its format in disk, except for the case // of .tdb, where we will encode as png. - VCL::ImageFormat format = img.get_image_format() != VCL::TDB ? - img.get_image_format() : VCL::PNG; + VCL::Image::Format format = img.get_image_format() != VCL::Image::Format::TDB ? + img.get_image_format() : VCL::Image::Format::PNG; if (cmd.isMember("format")) { std::string requested_format = get_value(cmd, "format"); if (requested_format == "png") { - format = VCL::PNG; + format = VCL::Image::Format::PNG; } else if (requested_format == "jpg") { - format = VCL::JPG; + format = VCL::Image::Format::JPG; } else { Json::Value return_error; return_error["status"] = RSCommand::Error; return_error["info"] = "Invalid Format for FindImage"; - error(return_error); + return error(return_error); } } @@ -268,18 +303,18 @@ Json::Value FindImage::construct_responses( Json::Value return_error; return_error["status"] = RSCommand::Error; return_error["info"] = "Image Data not found"; - error(return_error); + return error(return_error); } } catch (VCL::Exception e) { print_exception(e); Json::Value return_error; return_error["status"] = RSCommand::Error; return_error["info"] = "VCL Exception"; - error(return_error); + return error(return_error); } } - if (!flag_empty) { + if (flag_empty) { findImage.removeMember("entities"); } diff --git a/src/ImageCommand.h b/src/ImageCommand.h index 8bfa070e..69ecdecb 100644 --- a/src/ImageCommand.h +++ b/src/ImageCommand.h @@ -58,15 +58,11 @@ namespace VDMS { int grp_id, Json::Value& error) = 0; - virtual bool need_blob() { return false; } + virtual bool need_blob(const Json::Value& cmd) { return false; } }; class AddImage: public ImageCommand { - const std::string DEFAULT_TDB_PATH = "images/tdb/database"; - const std::string DEFAULT_PNG_PATH = "images/png_database"; - const std::string DEFAULT_JPG_PATH = "images/jpg_database"; - std::string _storage_tdb; std::string _storage_png; std::string _storage_jpg; @@ -80,7 +76,22 @@ namespace VDMS { int grp_id, Json::Value& error); - bool need_blob() { return true; } + bool need_blob(const Json::Value& cmd) { return true; } + }; + + class UpdateImage: public ImageCommand + { + public: + UpdateImage(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + // TODO In order to support "format" or "operations", we could + // implement VCL save operation by adding construct_responses method. }; class FindImage: public ImageCommand @@ -96,7 +107,8 @@ namespace VDMS { Json::Value construct_responses( Json::Value &json_responses, const Json::Value &json, - protobufs::queryMessage &response); + protobufs::queryMessage &response, + const std::string &blob); }; }; // namespace VDMS diff --git a/src/PMGDIterators.cc b/src/PMGDIterators.cc index c35fdb3c..9dc97b05 100644 --- a/src/PMGDIterators.cc +++ b/src/PMGDIterators.cc @@ -33,30 +33,24 @@ using namespace VDMS; -bool PMGDQueryHandler::ReusableNodeIterator::_next() +namespace VDMS { + +template <> +PMGDQueryHandler::ReusableIterator:: +ReusableIterator() : + _ti(NULL), + _it(_traversed.end()) { - if (_it != _traversed.end()) { - ++_it; - if (_it != _traversed.end()) - return true; - } - if (bool(_ni)) { - _it = _traversed.insert(_traversed.end(), &*_ni); - _ni.next(); - return true; - } - return false; } -// TODO It might be possible to avoid this if the first iterator -// was build out of an index sorted on the same key been sought here. -// Hopefully that won't happen. -void PMGDQueryHandler::ReusableNodeIterator::sort(PMGD::StringID sortkey) +template<> +void PMGDQueryHandler::ReusableIterator::add(PMGD::Edge *e) { - // First finish traversal - traverse_all(); - _traversed.sort(compare_propkey{sortkey}); - _it = _traversed.begin(); + // Easiest to add to the end of list. If we are in middle of + // traversal, then this edge might get skipped. Use this function + // with that understanding *** + _traversed.insert(_traversed.end(), e); +} } bool PMGDQueryHandler::MultiNeighborIteratorImpl::_next() @@ -89,3 +83,46 @@ bool PMGDQueryHandler::MultiNeighborIteratorImpl::next() return _next(); } +bool PMGDQueryHandler::NodeEdgeIteratorImpl::next() +{ + _edge_it->next(); + while (_edge_it != NULL && bool(*_edge_it)) { + if (check_predicates()) + return true; + _edge_it->next(); + } + return _next(); +} + +bool PMGDQueryHandler::NodeEdgeIteratorImpl::_next() +{ + while (_src_ni != NULL && bool(*_src_ni)) { + delete _edge_it; + _src_ni->next(); + if (bool(*_src_ni)) { + _edge_it = new PMGD::EdgeIterator((*_src_ni)->get_edges(_dir, _expr.tag())); + while (_edge_it != NULL && bool(*_edge_it)) { + if (check_predicates()) + return true; + _edge_it->next(); + } + } + else + break; + } + return false; +} + +bool PMGDQueryHandler::NodeEdgeIteratorImpl::check_predicates() +{ + PMGD::Edge *e = get_edge(); + for (std::size_t i = _pred_start; i < _num_predicates; i++) { + PMGD::PropertyFilter pf(_expr.predicate(i)); + if (pf(*e) == PMGD::DontPass) + return false; + } + if (_check_dest && + _dest_nodes.find(&(e->get_destination()) ) == _dest_nodes.end()) + return false; + return true; +} diff --git a/src/PMGDIterators.h b/src/PMGDIterators.h index 5d2cbe8a..79fcf76b 100644 --- a/src/PMGDIterators.h +++ b/src/PMGDIterators.h @@ -31,26 +31,44 @@ #pragma once +#include + #include "pmgd.h" #include "PMGDQueryHandler.h" #include "SearchExpression.h" namespace VDMS { - class PMGDQueryHandler::ReusableNodeIterator + + template + class PMGDQueryHandler::ReusableIterator { // Iterator for the starting nodes. - PMGD::NodeIterator _ni; + Ti _ti; // Type Iterator - // TODO Is list the best data structure if we could potentially - // sort? - std::list _traversed; + // TODO Is list the best data structure + // if we could potentially sort? + typedef std::list base_container; + base_container _traversed; // Current postion of list iterator - std::list::iterator _it; + typedef typename base_container::iterator list_iterator; + list_iterator _it; - bool _next(); + bool _next() { + if (_it != _traversed.end()) { + ++_it; + if (_it != _traversed.end()) + return true; + } + if (bool(_ti)) { + _it = _traversed.insert(_traversed.end(), &static_cast(*_ti)); + _ti.next(); + return true; + } + return false; + } - PMGD::Node *ref() + T *ref() { if (!bool(*this)) throw PMGDException(NullIterator, "Null impl"); @@ -58,44 +76,80 @@ namespace VDMS { } // TODO Is this the best way to do this - struct compare_propkey + struct compare_propkey_ascending { PMGD::StringID _propid; - bool operator()(const PMGD::Node *n1, const PMGD::Node *n2) + bool operator()(const T *n1, const T *n2) { return n1->get_property(_propid) < n2->get_property(_propid); } }; + struct compare_propkey_descending + { + PMGD::StringID _propid; + bool operator()(const T *n1, const T *n2) + { return n1->get_property(_propid) > n2->get_property(_propid); } + }; + public: // Make sure this is not auto-declared. The move one won't be. - ReusableNodeIterator(const ReusableNodeIterator &) = delete; - ReusableNodeIterator(PMGD::NodeIterator ni) - : _ni(ni), + ReusableIterator(const ReusableIterator &) = delete; + ReusableIterator(Ti ti) + : _ti(ti), _it(_traversed.begin()) { _next(); } // Add this to clean up the NewNodeIterator requirement - ReusableNodeIterator(PMGD::Node *n) - : _ni(NULL), + ReusableIterator(T *n) + : _ti(NULL), _it(_traversed.insert(_traversed.end(), n)) {} + ReusableIterator(); + operator bool() const { return _it != _traversed.end(); } bool next() { return _next(); } - PMGD::Node &operator *() { return *ref(); } - PMGD::Node *operator ->() { return ref(); } + T &operator *() { return *ref(); } + T *operator ->() { return ref(); } void reset() { _it = _traversed.begin(); } void traverse_all() { - for( ; _ni; _ni.next()) - _traversed.insert(_traversed.end(), &*_ni); + for( ; _ti; _ti.next()) + _traversed.insert(_traversed.end(), &static_cast(*_ti)); } // Sort the list. Once the list is sorted, all operations // following that happen in a sorted manner. And this function // resets the iterator to the beginning. - void sort(PMGD::StringID sortkey); + void sort(PMGD::StringID sortkey, bool descending = false){ + // First finish traversal + traverse_all(); + if (descending) + _traversed.sort(compare_propkey_descending{sortkey}); + else + _traversed.sort(compare_propkey_ascending{sortkey}); + + _it = _traversed.begin(); + } + + // Allow adding of edges as we construct this iterator in add_edge + // call. This is different than add_node since once add_edge can + // cause multiple edges to be created depending on how many nodes + // matched the source/destination conditions + void add(T *t); }; + // Specialization for PMGDQueryHandler::ReusableIterator + + template <> + PMGDQueryHandler::ReusableIterator:: + ReusableIterator(); + + template<> + void PMGDQueryHandler::ReusableIterator:: + add(PMGD::Edge *e); + + // End of specialization for PMGDQueryHandler::ReusableIterator + class PMGDQueryHandler::MultiNeighborIteratorImpl : public PMGD::NodeIteratorImplIntf { @@ -132,4 +186,86 @@ namespace VDMS { PMGD::Node *ref() { return &**_neighb_i; } }; + + class PMGDQueryHandler::NodeEdgeIteratorImpl : public PMGD::EdgeIteratorImplIntf + { + /// Reference to expression to evaluate + const SearchExpression _expr; + const size_t _num_predicates; + + ReusableNodeIterator *_src_ni; + ReusableNodeIterator *_dest_ni; + + // In order to check if the other end of an edge is in the nodes + // covered by the dest_ni, it is best to store those nodes in an + // easily searchable data structure, which a list inside ReusableNodeIterator + // is not. Besides, it doesn't make sense to expose that list here. + std::unordered_set _dest_nodes; + + std::size_t _pred_start; + PMGD::Direction _dir; + bool _check_dest; + + PMGD::EdgeIterator *_edge_it; + + bool _next(); + bool check_predicates(); + + PMGD::EdgeIterator return_iterator() + { + _dir = PMGD::Direction::Outgoing; + if (_src_ni == NULL) { + if (_dest_ni == NULL) + _pred_start = 1; + else { + _dir = PMGD::Direction::Incoming; + _src_ni = _dest_ni; + _dest_ni = NULL; + } + } + + // !bool(*_src_ni) will never be empty because of how the code is + // right now, but we should change in the future because we want + // to continue with the transaction even if some querynode did not + // find anything. We leave it for now. + if (_src_ni == NULL || !bool(*_src_ni)) { + PMGD::PropertyPredicate pp; + if (_num_predicates > 0) + pp = _expr.predicate(0); + return _expr.db().get_edges(_expr.tag(), pp); + } + else { + return (*_src_ni)->get_edges(_dir, _expr.tag()); + } + } + + public: + NodeEdgeIteratorImpl(const SearchExpression &expr, + ReusableNodeIterator *src_ni = NULL, + ReusableNodeIterator *dest_ni = NULL) + : _expr(expr), _num_predicates(_expr.num_predicates()), + _src_ni(src_ni), _dest_ni(dest_ni), + _pred_start(0), _check_dest(false), + _edge_it(new PMGD::EdgeIterator(return_iterator())) + { + if (_dest_ni != NULL) { + for (; bool(*_dest_ni); _dest_ni->next()) + _dest_nodes.insert(&(**_dest_ni)); + // This iterator will be reset outside + _dest_ni = NULL; + _check_dest = true; + } + if (!check_predicates()) + next(); + } + + operator bool() const { return bool(*_edge_it); } + + bool next(); + PMGD::EdgeRef *ref() { return &(**_edge_it); } + PMGD::StringID get_tag() const { return (*_edge_it)->get_tag(); } + PMGD::Node &get_source() const { return (*_edge_it)->get_source(); } + PMGD::Node &get_destination() const { return (*_edge_it)->get_destination(); } + PMGD::Edge *get_edge() const { return &static_cast(**_edge_it); } + }; } diff --git a/src/PMGDQuery.cc b/src/PMGDQuery.cc index 2ac6978b..a712bc1d 100644 --- a/src/PMGDQuery.cc +++ b/src/PMGDQuery.cc @@ -46,7 +46,8 @@ using namespace VDMS; #define REFERENCE_RANGE_START 20000 PMGDQuery::PMGDQuery(PMGDQueryHandler& pmgd_qh) : - _pmgd_qh(pmgd_qh), _current_ref(REFERENCE_RANGE_START) + _pmgd_qh(pmgd_qh), _current_ref(REFERENCE_RANGE_START), + _readonly(true) { _current_group_id = 0; //this command to start a new transaction @@ -77,7 +78,7 @@ Json::Value& PMGDQuery::run() // execute the queries using the PMGDQueryHandler object std::vector> _pmgd_responses; - _pmgd_responses = _pmgd_qh.process_queries(_cmds, _current_group_id + 1); + _pmgd_responses = _pmgd_qh.process_queries(_cmds, _current_group_id + 1, _readonly); if (_pmgd_responses.size() != _current_group_id + 1) { if (_pmgd_responses.size() == 1 && _pmgd_responses[0].size() == 1) { @@ -86,7 +87,7 @@ Json::Value& PMGDQuery::run() return _json_responses; } _json_responses["status"] = -1; - _json_responses["info"] = "PMGD Transacion Error"; + _json_responses["info"] = "PMGDQuery: PMGD Transacion Error"; return _json_responses; } @@ -115,6 +116,7 @@ void PMGDQuery::add_link(const Json::Value& link, PMGDQueryNode* qn) PMGD::protobufs::LinkInfo *qnl = qn->mutable_link(); qnl->set_start_identifier(link["ref"].asInt()); + qnl->set_dir(PMGD::protobufs::LinkInfo::Any); if (link.isMember("direction")) { const std::string& direction = link["direction"].asString(); @@ -123,12 +125,12 @@ void PMGDQuery::add_link(const Json::Value& link, PMGDQueryNode* qn) qnl->set_dir(PMGD::protobufs::LinkInfo::Outgoing); else if ( direction == "in") qnl->set_dir(PMGD::protobufs::LinkInfo::Incoming); - else - qnl->set_dir(PMGD::protobufs::LinkInfo::Any); } if (link.isMember("unique")) qnl->set_nb_unique(link["unique"].asBool()); + else + qnl->set_nb_unique(false); if (link.isMember("class")) qnl->set_e_tag(link["class"].asString()); @@ -257,7 +259,7 @@ Json::Value PMGDQuery::parse_response(PMGDCmdResponse* response) Json::Value list(Json::arrayValue); auto& mymap = response->prop_values(); - assert(mymap.size() > 0); + // assert(mymap.size() > 0); uint64_t count = response->op_int_value(); @@ -274,7 +276,10 @@ Json::Value PMGDQuery::parse_response(PMGDCmdResponse* response) // if count <= 0, we return an empty list (json array) ret["returned"] = (Json::UInt64) count; - ret["entities"] = list; + if (response->node_edge()) + ret["entities"] = list; + else + ret["connections"] = list; } else { return construct_error_response(response); @@ -322,7 +327,7 @@ Json::Value PMGDQuery::parse_response(PMGDCmdResponse* response) } void PMGDQuery::parse_query_constraints(const Json::Value& constraints, - PMGDQueryNode* qn) + PMGDQueryConstraints* qn) { for (auto it = constraints.begin(); it != constraints.end(); ++it) { @@ -333,16 +338,49 @@ void PMGDQuery::parse_query_constraints(const Json::Value& constraints, assert(predicate.isArray()); assert(predicate.size() == 2 || predicate.size() == 4); - PMGDPropPred* pp = qn->add_predicates(); - pp->set_key(key); //assign the property predicate key + if (predicate.size() == 2 && predicate[1].isArray()) { + + // This will make the entire query OR, + // not sure if it is right. + qn->set_p_op(PMGD::protobufs::Or); + + const std::string& pred1 = predicate[0].asString(); + + PMGDPropPred::Op op; + + if (pred1 == ">") + op = PMGDPropPred::Gt; + else if (pred1 == ">=") + op = PMGDPropPred::Ge; + else if (pred1 == "<") + op = PMGDPropPred::Lt; + else if (pred1 == "<=") + op = PMGDPropPred::Le; + else if (pred1 == "==") + op = PMGDPropPred::Eq; + else if (pred1 == "!=") + op = PMGDPropPred::Ne; + + for (auto& value : predicate[1]) { + + PMGDPropPred* pp = qn->add_predicates(); + pp->set_key(key); //assign the property predicate key + pp->set_op(op); + PMGDProp* p1 = pp->mutable_v1(); + set_property(p1, key, value); + } + + } + else if (predicate.size() == 2) { + + PMGDPropPred* pp = qn->add_predicates(); + pp->set_key(key); //assign the property predicate key - PMGDProp* p1 = pp->mutable_v1(); - set_property(p1, key, predicate[1]); + PMGDProp* p1 = pp->mutable_v1(); + set_property(p1, key, predicate[1]); - PMGDPropPred::Op op; - const std::string& pred1 = predicate[0].asString(); + const std::string& pred1 = predicate[0].asString(); - if (predicate.size() == 2) { if (pred1 == ">") pp->set_op(PMGDPropPred::Gt); else if (pred1 == ">=") @@ -357,6 +395,15 @@ void PMGDQuery::parse_query_constraints(const Json::Value& constraints, pp->set_op(PMGDPropPred::Ne); } else { + + PMGDPropPred* pp = qn->add_predicates(); + pp->set_key(key); //assign the property predicate key + + PMGDProp* p1 = pp->mutable_v1(); + set_property(p1, key, predicate[1]); + + const std::string& pred1 = predicate[0].asString(); + PMGDProp* p2 = pp->mutable_v2(); set_property(p2, key, predicate[3]); @@ -375,7 +422,7 @@ void PMGDQuery::parse_query_constraints(const Json::Value& constraints, } } -void PMGDQuery::get_response_type(const Json::Value& res, PMGDQueryNode *qn) +void PMGDQuery::get_response_type(const Json::Value& res, PMGDQueryResultInfo *qn) { for (auto it = res.begin(); it != res.end(); it++) { std::string *r_key= qn->add_response_keys(); @@ -384,7 +431,7 @@ void PMGDQuery::get_response_type(const Json::Value& res, PMGDQueryNode *qn) } void PMGDQuery::parse_query_results(const Json::Value& results, - PMGDQueryNode *qn) + PMGDQueryResultInfo *qn) { for (auto it = results.begin(); it != results.end(); it++) { const std::string& key = it.key().asString(); @@ -403,7 +450,22 @@ void PMGDQuery::parse_query_results(const Json::Value& results, else if (key == "sort") { qn->set_sort(true); std::string *sort_key= qn->mutable_sort_key(); - *sort_key = (*it).asString(); + + if ((*it).isObject()) { + *sort_key = (*it)["key"].asString(); + if ((*it).isMember("order")) { + qn->set_descending((*it)["order"] == "descending" ? + true : false); + } + else { + // Default is False (i.e. result in ascending order) + qn->set_descending(false); + } + } + else { + *sort_key = (*it).asString(); + qn->set_descending(false); + } } else if (key == "limit") { int limit = (*it).asUInt(); @@ -421,6 +483,8 @@ void PMGDQuery::AddNode(int ref, const Json::Value& props, const Json::Value& constraints) { + _readonly = false; + PMGDCmd* cmdadd = new PMGDCmd(); cmdadd->set_cmd_id(PMGDCmd::AddNode); cmdadd->set_cmd_grp_id(_current_group_id); @@ -438,21 +502,65 @@ void PMGDQuery::AddNode(int ref, if(!constraints.isNull()) { PMGDQueryNode *qn = an->mutable_query_node(); qn->set_identifier(ref); // Use the same ref to cache if node exists. - qn->set_tag(tag); - qn->set_unique(true); - qn->set_p_op(PMGD::protobufs::And); - qn->set_r_type(PMGD::protobufs::NodeID); // Since PMGD returns ids. - parse_query_constraints(constraints, qn); + PMGDQueryConstraints *qc = qn->mutable_constraints(); + qc->set_tag(tag); + qc->set_unique(true); + qc->set_p_op(PMGD::protobufs::And); + parse_query_constraints(constraints, qc); + PMGDQueryResultInfo *qr = qn->mutable_results(); + qr->set_r_type(PMGD::protobufs::NodeID); // Since PMGD returns ids. } _cmds.push_back(cmdadd); } +void PMGDQuery::UpdateNode(int ref, + const std::string& tag, + const Json::Value& props, + const Json::Value& remove_props, + const Json::Value& constraints, + bool unique) +{ + _readonly = false; + + PMGDCmd* cmdupdate = new PMGDCmd(); + cmdupdate->set_cmd_id(PMGDCmd::UpdateNode); + cmdupdate->set_cmd_grp_id(_current_group_id); + PMGD::protobufs::UpdateNode *un = cmdupdate->mutable_update_node(); + un->set_identifier(ref); + + for (auto it = props.begin(); it != props.end(); ++it) { + PMGDProp* p = un->add_properties(); + set_property(p, it.key().asString(), *it); + } + + for (auto it = remove_props.begin(); it != remove_props.end(); it++) { + std::string *r_key= un->add_remove_props(); + *r_key = (*it).asString(); + } + + if(!constraints.isNull()) { + PMGDQueryNode *qn = un->mutable_query_node(); + qn->set_identifier(ref < 0 ? get_available_reference() : ref); + PMGDQueryConstraints *qc = qn->mutable_constraints(); + qc->set_tag(tag); + qc->set_unique(unique); + qc->set_p_op(PMGD::protobufs::And); + parse_query_constraints(constraints, qc); + PMGDQueryResultInfo *qr = qn->mutable_results(); + qr->set_r_type(PMGD::protobufs::NodeID); // Since PMGD returns ids. + } + + _cmds.push_back(cmdupdate); +} + void PMGDQuery::AddEdge(int ident, int src, int dst, const std::string& tag, const Json::Value& props) { + _readonly = false; + PMGDCmd* cmdedge = new PMGDCmd(); cmdedge->set_cmd_grp_id(_current_group_id); cmdedge->set_cmd_id(PMGDCmd::AddEdge); @@ -472,6 +580,48 @@ void PMGDQuery::AddEdge(int ident, _cmds.push_back(cmdedge); } +void PMGDQuery::UpdateEdge(int ref, int src_ref, int dest_ref, + const std::string& tag, + const Json::Value& props, + const Json::Value& remove_props, + const Json::Value& constraints, + bool unique) +{ + _readonly = false; + + PMGDCmd* cmdupdate = new PMGDCmd(); + cmdupdate->set_cmd_id(PMGDCmd::UpdateEdge); + cmdupdate->set_cmd_grp_id(_current_group_id); + PMGD::protobufs::UpdateEdge *ue = cmdupdate->mutable_update_edge(); + ue->set_identifier(ref); + + for (auto it = props.begin(); it != props.end(); ++it) { + PMGDProp* p = ue->add_properties(); + set_property(p, it.key().asString(), *it); + } + + for (auto it = remove_props.begin(); it != remove_props.end(); it++) { + std::string *r_key= ue->add_remove_props(); + *r_key = (*it).asString(); + } + + if(!constraints.isNull()) { + PMGDQueryEdge *qe = ue->mutable_query_edge(); + qe->set_identifier(ref < 0 ? get_available_reference() : ref); + qe->set_src_node_id(src_ref); + qe->set_dest_node_id(dest_ref); + PMGDQueryConstraints *qc = qe->mutable_constraints(); + qc->set_tag(tag); + qc->set_unique(unique); + qc->set_p_op(PMGD::protobufs::And); + parse_query_constraints(constraints, qc); + PMGDQueryResultInfo *qr = qe->mutable_results(); + qr->set_r_type(PMGD::protobufs::EdgeID); // Since PMGD returns ids. + } + + _cmds.push_back(cmdupdate); +} + void PMGDQuery::QueryNode(int ref, const std::string& tag, const Json::Value& link, @@ -486,19 +636,55 @@ void PMGDQuery::QueryNode(int ref, PMGDQueryNode *qn = cmdquery->mutable_query_node(); qn->set_identifier(ref); - qn->set_tag(tag); - qn->set_unique(unique); - if (!link.isNull()) + PMGDQueryConstraints *qc = qn->mutable_constraints(); + qc->set_tag(tag); + qc->set_unique(unique); + + if (!link.isNull()) { add_link(link, qn); + } + + // TODO: We always assume AND, we need to change that + qc->set_p_op(PMGD::protobufs::And); + if (!constraints.isNull()) + parse_query_constraints(constraints, qc); + + PMGDQueryResultInfo *qr = qn->mutable_results(); + if (!results.isNull()) + parse_query_results(results, qr); + + _cmds.push_back(cmdquery); +} + +void PMGDQuery::QueryEdge(int ref, int src_ref, int dest_ref, + const std::string& tag, + const Json::Value& constraints, + const Json::Value& results, + bool unique) +{ + PMGDCmd* cmdquery = new PMGDCmd(); + cmdquery->set_cmd_id(PMGDCmd::QueryEdge); + cmdquery->set_cmd_grp_id(_current_group_id); + + PMGDQueryEdge *qn = cmdquery->mutable_query_edge(); + + qn->set_identifier(ref); + qn->set_src_node_id(src_ref); + qn->set_dest_node_id(dest_ref); + + PMGDQueryConstraints *qc = qn->mutable_constraints(); + qc->set_tag(tag); + qc->set_unique(unique); // TODO: We always assume AND, we need to change that - qn->set_p_op(PMGD::protobufs::And); + qc->set_p_op(PMGD::protobufs::And); if (!constraints.isNull()) - parse_query_constraints(constraints, qn); + parse_query_constraints(constraints, qc); + PMGDQueryResultInfo *qr = qn->mutable_results(); if (!results.isNull()) - parse_query_results(results, qn); + parse_query_results(results, qr); _cmds.push_back(cmdquery); } diff --git a/src/PMGDQuery.h b/src/PMGDQuery.h index 37afc3d9..67b54f2a 100644 --- a/src/PMGDQuery.h +++ b/src/PMGDQuery.h @@ -49,6 +49,7 @@ namespace VDMS { unsigned _current_group_id; PMGDQueryHandler& _pmgd_qh; unsigned _current_ref; + bool _readonly; // Stays true unless some write cmd sets it to false. Json::Value _json_responses; @@ -56,12 +57,12 @@ namespace VDMS { const Json::Value& val); void add_link(const Json::Value& link, PMGDQueryNode* qn); void parse_query_constraints(const Json::Value& constraints, - PMGDQueryNode* qn); + PMGDQueryConstraints* qc); void parse_query_results(const Json::Value& result_type, - PMGDQueryNode* qn); + PMGDQueryResultInfo* qr); - void get_response_type(const Json::Value& res, PMGDQueryNode* qn); + void get_response_type(const Json::Value& res, PMGDQueryResultInfo* qn); Json::Value parse_response(PMGDCmdResponse* response); @@ -83,22 +84,44 @@ namespace VDMS { //This is a reference to avoid copies Json::Value& get_json_responses() {return _json_responses;} + PMGDQueryHandler& get_pmgd_qh() {return _pmgd_qh;} + // If constraints is not null, this becomes a conditional AddNode void AddNode(int ref, const std::string& tag, const Json::Value& props, const Json::Value& constraints); + void UpdateNode(int ref, + const std::string& tag, + const Json::Value& props, + const Json::Value& remove_props, + const Json::Value& constraints, + bool unique); + void AddEdge(int ident, int src, int dst, const std::string& tag, const Json::Value& props); + void UpdateEdge(int ref, int src_ref, int dest_ref, + const std::string& tag, + const Json::Value& props, + const Json::Value& remove_props, + const Json::Value& constraints, + bool unique); + void QueryNode(int ref, const std::string& tag, const Json::Value& link, const Json::Value& constraints, const Json::Value& results, bool unique = false); + + void QueryEdge(int ref, int src_ref, int dest_ref, + const std::string& tag, + const Json::Value& constraints, + const Json::Value& results, + bool unique = false); }; } diff --git a/src/PMGDQueryHandler.cc b/src/PMGDQueryHandler.cc index 368954be..d38589b8 100644 --- a/src/PMGDQueryHandler.cc +++ b/src/PMGDQueryHandler.cc @@ -42,30 +42,46 @@ using namespace PMGD; using namespace VDMS; PMGD::Graph *PMGDQueryHandler::_db; -std::mutex *PMGDQueryHandler::_dblock; void PMGDQueryHandler::init() { - std::string dbname = VDMSConfig::instance() - ->get_string_value("pmgd_path", "default_pmgd"); - + std::string dbname = VDMSConfig::instance()->get_path_pmgd(); + int nalloc = VDMSConfig::instance()-> + get_int_value(PARAM_PMGD_NUM_ALLOCATORS, DEFAULT_PMGD_NUM_ALLOCATORS); + + PMGD::Graph::Config config; + config.num_allocators = nalloc; + + // TODO: Include allocators timeouts params as parameters for VDMS. + // These parameters can be loaded everytime VDMS is run. + // We need PMGD to support these as config params before we can do it here. + // Create a db - _db = new PMGD::Graph(dbname.c_str(), PMGD::Graph::Create); + _db = new PMGD::Graph(dbname.c_str(), PMGD::Graph::Create, &config); +} - // Create the query handler here assuming database is valid now. - _dblock = new std::mutex(); +void PMGDQueryHandler::destroy() +{ + if (_db) { + delete _db; + _db = NULL; + } } std::vector PMGDQueryHandler::process_queries(const PMGDCmds &cmds, - int num_groups) + int num_groups, bool readonly) { std::vector responses(num_groups); assert(_tx == NULL); - _dblock->lock(); + + // Assuming one query handler handles one TX at a time. + _readonly = readonly; + for (const auto cmd : cmds) { PMGDCmdResponse *response = new PMGDCmdResponse(); + response->set_node_edge(true); // most queries are node related if (process_query(cmd, response) < 0) { error_cleanup(responses, response); break; // Goto cleanup site. @@ -85,7 +101,6 @@ std::vector _tx = NULL; } - _dblock->unlock(); return responses; } @@ -121,9 +136,8 @@ int PMGDQueryHandler::process_query(const PMGDCmd *cmd, switch (code) { case PMGDCmd::TxBegin: { - - // TODO: Needs to distinguish transaction parameters like RO/RW - _tx = new Transaction(*_db, Transaction::ReadWrite); + int tx_options = _readonly ? Transaction::ReadOnly : Transaction::ReadWrite; + _tx = new Transaction(*_db, tx_options); set_response(response, protobufs::TX, PMGDCmdResponse::Success); break; } @@ -149,6 +163,15 @@ int PMGDQueryHandler::process_query(const PMGDCmd *cmd, case PMGDCmd::QueryNode: retval = query_node(cmd->query_node(), response); break; + case PMGDCmd::QueryEdge: + retval = query_edge(cmd->query_edge(), response); + break; + case PMGDCmd::UpdateNode: + update_node(cmd->update_node(), response); + break; + case PMGDCmd::UpdateEdge: + update_edge(cmd->update_edge(), response); + break; } } catch (Exception e) { @@ -209,9 +232,65 @@ int PMGDQueryHandler::add_node(const protobufs::AddNode &cn, return 0; } +int PMGDQueryHandler::update_node(const protobufs::UpdateNode &un, + protobufs::CommandResponse *response) +{ + long id = un.identifier(); + bool query = un.has_query_node(); + + auto it = _cached_nodes.end(); + + // If both _ref and query are defined, _ref will have priority. + if (id >= 0) + it = _cached_nodes.find(id); + + if (it == _cached_nodes.end()) { + if (!query) { + set_response(response, PMGDCmdResponse::Error, "Undefined _ref value used in update\n"); + return -1; + } + else { + query_node(un.query_node(), response); + if (response->error_code() != PMGDCmdResponse::Success) + return -1; + long qn_id = un.query_node().identifier(); + if (qn_id >= 0) + it = _cached_nodes.find(qn_id); + else { + set_response(response, PMGDCmdResponse::Error, "Undefined _ref value used in update\n"); + return -1; + } + } + } + + auto nit = it->second; + long updated = 0; + for ( ; *nit; nit->next()) { + Node &n = **nit; + updated++; + for (int i = 0; i < un.properties_size(); ++i) { + const protobufs::Property &p = un.properties(i); + set_property(n, p); + } + for (int i = 0; i < un.remove_props_size(); ++i) + n.remove_property(un.remove_props(i).c_str()); + } + nit->reset(); + set_response(response, protobufs::Count, PMGDCmdResponse::Success); + response->set_op_int_value(updated); + return 0; +} + int PMGDQueryHandler::add_edge(const protobufs::AddEdge &ce, PMGDCmdResponse *response) { + response->set_node_edge(false); + long id = ce.identifier(); + if (id >= 0 && _cached_edges.find(id) != _cached_edges.end()) { + set_response(response, PMGDCmdResponse::Error, "Reuse of _ref value\n"); + return -1; + } + // Presumably this node gets placed here. StringID sid(ce.edge().tag().c_str()); @@ -238,6 +317,10 @@ int PMGDQueryHandler::add_edge(const protobufs::AddEdge &ce, return -1; } + ReusableEdgeIterator *rei = NULL; + if (id >= 0) + rei = new ReusableEdgeIterator(); + long eid = 0; // TODO: Partition code goes here for ( ; *srcni; srcni->next()) { @@ -245,6 +328,9 @@ int PMGDQueryHandler::add_edge(const protobufs::AddEdge &ce, for ( ; *dstni; dstni->next()) { Node &dst = **dstni; Edge &e = _db->add_edge(src, dst, sid); + if (id >= 0) + rei->add(&e); + for (int i = 0; i < ce.edge().properties_size(); ++i) { const PMGDProp &p = ce.edge().properties(i); set_property(e, p); @@ -256,12 +342,72 @@ int PMGDQueryHandler::add_edge(const protobufs::AddEdge &ce, } srcni->reset(); + if (id >= 0) { + rei->reset(); // Since we add at tail. + _cached_edges[id] = rei; + } + set_response(response, protobufs::EdgeID, PMGDCmdResponse::Success); + // ID of the last edge added response->set_op_int_value(eid); return 0; } +int PMGDQueryHandler::update_edge(const protobufs::UpdateEdge &ue, + PMGDCmdResponse *response) +{ + long id = ue.identifier(); + bool query = ue.has_query_edge(); + + auto it = _cached_edges.end(); + + if (id >= 0) + it = _cached_edges.find(id); + + if (it == _cached_edges.end()) { + if (!query) { + set_response(response, PMGDCmdResponse::Error, "Undefined _ref value used in update\n"); + return -1; + } + else { + query_edge(ue.query_edge(), response); + if (response->error_code() != PMGDCmdResponse::Success) + return -1; + long qe_id = ue.query_edge().identifier(); + if (qe_id >= 0) + it = _cached_edges.find(qe_id); + else { + set_response(response, PMGDCmdResponse::Error, "Undefined _ref value used in update\n"); + return -1; + } + } + } + + auto eit = it->second; + long updated = 0; + for ( ; *eit; eit->next()) { + Edge &e = **eit; + updated++; + for (int i = 0; i < ue.properties_size(); ++i) { + const protobufs::Property &p = ue.properties(i); + set_property(e, p); + } + for (int i = 0; i < ue.remove_props_size(); ++i) + // TODO: If many nodes/edges are being updated, + // it would be advantageous + // to get the StringIDs for the properties in advance instead of + // converting each property name to a StringID + // every time it is used. + e.remove_property(ue.remove_props(i).c_str()); + } + eit->reset(); + set_response(response, protobufs::Count, PMGDCmdResponse::Success); + response->set_op_int_value(updated); + return 0; + +} + template void PMGDQueryHandler::set_property(Element &e, const PMGDProp &p) { @@ -299,12 +445,8 @@ int PMGDQueryHandler::query_node(const protobufs::QueryNode &qn, ReusableNodeIterator *start_ni = NULL; PMGD::Direction dir; StringID edge_tag; - - if (qn.p_op() == protobufs::Or) { - set_response(response, PMGDCmdResponse::Error, - "Or operation not implemented\n"); - return -1; - } + const PMGDQueryConstraints &qc = qn.constraints(); + const PMGDQueryResultInfo &qr = qn.results(); long id = qn.identifier(); if (id >= 0 && _cached_nodes.find(id) != _cached_nodes.end()) { @@ -316,11 +458,14 @@ int PMGDQueryHandler::query_node(const protobufs::QueryNode &qn, bool has_link = qn.has_link(); if (has_link) { // case where link is used. const protobufs::LinkInfo &link = qn.link(); - if (link.nb_unique()) { // TODO Add support for unique neighbors across iterators + + if (link.nb_unique()) { + // TODO Add support for unique neighbors across iterators set_response(response, PMGDCmdResponse::Error, "Non-repeated neighbors not supported\n"); return -1; } + long start_id = link.start_identifier(); auto start = _cached_nodes.find(start_id); if (start == _cached_nodes.end()) { @@ -336,37 +481,40 @@ int PMGDQueryHandler::query_node(const protobufs::QueryNode &qn, : StringID(link.e_tag().c_str()); } - StringID search_node_tag = (qn.tag_oneof_case() == PMGDQueryNode::kTagid) - ? StringID(qn.tagid()) - : StringID(qn.tag().c_str()); + StringID search_node_tag = (qc.tag_oneof_case() == PMGDQueryConstraints::kTagid) + ? StringID(qc.tagid()) + : StringID(qc.tag().c_str()); - SearchExpression search(*_db, search_node_tag); + SearchExpression search(*_db, search_node_tag, + qn.constraints().p_op() == protobufs::Or); - for (int i = 0; i < qn.predicates_size(); ++i) { - const PMGDPropPred &p_pp = qn.predicates(i); + for (int i = 0; i < qc.predicates_size(); ++i) { + const PMGDPropPred &p_pp = qc.predicates(i); PropertyPredicate j_pp = construct_search_term(p_pp); search.add(j_pp); } - NodeIterator ni = has_link ? + PMGD::NodeIterator ni = has_link ? PMGD::NodeIterator(new MultiNeighborIteratorImpl(start_ni, search, dir, edge_tag)) : search.eval_nodes(); if (!bool(ni)) { set_response(response, PMGDCmdResponse::Empty, "Null search iterator\n"); + if (has_link) + start_ni->reset(); return -1; } // Set these in case there is no results block. - set_response(response, qn.r_type(), PMGDCmdResponse::Success); + set_response(response, qr.r_type(), PMGDCmdResponse::Success); // TODO: Also, this triggers a copy of the SearchExpression object // via the SearchExpressionIterator class, which might be slow, // especially with a lot of property constraints. Might need another // way for it. - if (!(id >= 0 || qn.unique() || qn.sort())) { + if (!(id >= 0 || qc.unique() || qr.sort())) { // If not reusable - build_results(ni, qn, response); + build_results(ni, qr, response); // Make sure the starting iterator is reset for later use. if (has_link) @@ -376,22 +524,24 @@ int PMGDQueryHandler::query_node(const protobufs::QueryNode &qn, ReusableNodeIterator *tni = new ReusableNodeIterator(ni); - if (qn.unique()) { + if (qc.unique()) { tni->next(); if (bool(*tni)) { // Not unique and that is an error here. set_response(response, PMGDCmdResponse::NotUnique, "Query response not unique\n"); + if (has_link) + start_ni->reset(); delete tni; return -1; } tni->reset(); } - if (qn.sort()) - tni->sort(qn.sort_key().c_str()); + if (qr.sort()) + tni->sort(qr.sort_key().c_str(), qr.descending()); - if (qn.r_type() != protobufs::Cached) - build_results(*tni, qn, response); + if (qr.r_type() != protobufs::Cached) + build_results(*tni, qr, response); if (id >= 0) { // We have to traverse the current iterator fully, so we can @@ -411,6 +561,116 @@ int PMGDQueryHandler::query_node(const protobufs::QueryNode &qn, return 0; } +int PMGDQueryHandler::query_edge(const protobufs::QueryEdge &qe, + PMGDCmdResponse *response) +{ + ReusableNodeIterator *start_ni = NULL; + PMGD::Direction dir; + StringID edge_tag; + const PMGDQueryConstraints &qc = qe.constraints(); + const PMGDQueryResultInfo &qr = qe.results(); + response->set_node_edge(false); + + if (qc.p_op() == protobufs::Or) { + set_response(response, PMGDCmdResponse::Error, + "Or operation not implemented\n"); + return -1; + } + + long id = qe.identifier(); + if (id >= 0 && _cached_edges.find(id) != _cached_edges.end()) { + set_response(response, PMGDCmdResponse::Error, + "Reuse of _ref value\n"); + return -1; + } + + // See if we need to match edges based on some starting or + // ending nodes. + long src_id = qe.src_node_id(); + ReusableNodeIterator *src_ni = NULL; + if (src_id >= 0) { + auto it = _cached_nodes.find(src_id); + if (it != _cached_nodes.end()) + src_ni = it->second; + } + long dest_id = qe.dest_node_id(); + ReusableNodeIterator *dest_ni = NULL; + if (dest_id >= 0) { + auto it = _cached_nodes.find(dest_id); + if (it != _cached_nodes.end()) + dest_ni = it->second; + } + + StringID search_edge_tag = (qc.tag_oneof_case() == PMGDQueryConstraints::kTagid) + ? StringID(qc.tagid()) + : StringID(qc.tag().c_str()); + + SearchExpression search(*_db, search_edge_tag, false); + + for (int i = 0; i < qc.predicates_size(); ++i) { + const PMGDPropPred &p_pp = qc.predicates(i); + PropertyPredicate j_pp = construct_search_term(p_pp); + search.add(j_pp); + } + + EdgeIterator ei = PMGD::EdgeIterator(new NodeEdgeIteratorImpl(search, src_ni, dest_ni)); + if (!bool(ei)) { + set_response(response, PMGDCmdResponse::Empty, + "Null search iterator\n"); + // Make sure the src and dest Node iterators are resettled. + if (src_ni != NULL) src_ni->reset(); + if (dest_ni != NULL) dest_ni->reset(); + return -1; + } + + // Set these in case there is no results block. + set_response(response, qr.r_type(), PMGDCmdResponse::Success); + + if (!(id >= 0 || qc.unique() || qr.sort())) { + // If not reusable + build_results(ei, qr, response); + + // Make sure the src and dest Node iterators are resettled. + if (src_ni != NULL) src_ni->reset(); + if (dest_ni != NULL) dest_ni->reset(); + + return 0; + } + + ReusableEdgeIterator *tei = new ReusableEdgeIterator(ei); + + if (qc.unique()) { + tei->next(); + if (bool(*tei)) { // Not unique and that is an error here. + set_response(response, PMGDCmdResponse::NotUnique, + "Query response not unique\n"); + delete tei; + if (src_ni != NULL) src_ni->reset(); + if (dest_ni != NULL) dest_ni->reset(); + return -1; + } + tei->reset(); + } + + if (qr.sort()) + tei->sort(qr.sort_key().c_str(), qr.descending()); + + if (qr.r_type() != protobufs::Cached) + build_results(*tei, qr, response); + + if (id >= 0) { + tei->traverse_all(); + tei->reset(); + _cached_edges[id] = tei; + } + else + delete tei; + + if (src_ni != NULL) src_ni->reset(); + if (dest_ni != NULL) dest_ni->reset(); + return 0; +} + PropertyPredicate PMGDQueryHandler::construct_search_term(const PMGDPropPred &p_pp) { StringID key = (p_pp.key_oneof_case() == 2) ? StringID(p_pp.keyid()) : StringID(p_pp.key().c_str()); @@ -456,18 +716,23 @@ Property PMGDQueryHandler::construct_search_property(const PMGDProp &p) namespace VDMS { template void PMGDQueryHandler::build_results(PMGD::NodeIterator &ni, - const protobufs::QueryNode &qn, + const protobufs::ResultInfo &qn, PMGDCmdResponse *response); template void PMGDQueryHandler::build_results( PMGDQueryHandler::ReusableNodeIterator &ni, - const protobufs::QueryNode &qn, + const protobufs::ResultInfo &qn, + PMGDCmdResponse *response); + template + void PMGDQueryHandler::build_results( + PMGD::EdgeIterator &ni, + const protobufs::ResultInfo &qn, PMGDCmdResponse *response); }; template void PMGDQueryHandler::build_results(Iterator &ni, - const protobufs::QueryNode &qn, + const protobufs::ResultInfo &qn, PMGDCmdResponse *response) { bool avg = false; @@ -484,10 +749,12 @@ void PMGDQueryHandler::build_results(Iterator &ni, for (; ni; ni.next()) { for (int i = 0; i < keyids.size(); ++i) { Property j_p; - if (!ni->check_property(keyids[i], j_p)) - continue; PMGDPropList &list = rmap[qn.response_keys(i)]; PMGDProp *p_p = list.add_values(); + if (!ni->check_property(keyids[i], j_p)) { + construct_missing_property(p_p); + continue; + } construct_protobuf_property(j_p, p_p); } count++; @@ -584,3 +851,10 @@ void PMGDQueryHandler::construct_protobuf_property(const Property &j_p, PMGDProp p_p->set_blob_value(j_p.blob_value().value, j_p.blob_value().size); } } + +void PMGDQueryHandler::construct_missing_property(PMGDProp *p_p) +{ + // Assumes matching enum values! + p_p->set_type(PMGDProp::StringType); + p_p->set_string_value("Missing property"); +} diff --git a/src/PMGDQueryHandler.h b/src/PMGDQueryHandler.h index 22926922..3a17f6e8 100644 --- a/src/PMGDQueryHandler.h +++ b/src/PMGDQueryHandler.h @@ -33,7 +33,6 @@ #include #include -#include #include #include @@ -48,7 +47,10 @@ namespace VDMS { typedef PMGD::protobufs::PropertyPredicate PMGDPropPred; typedef PMGD::protobufs::PropertyList PMGDPropList; typedef PMGD::protobufs::Property PMGDProp; + typedef PMGD::protobufs::Constraints PMGDQueryConstraints; + typedef PMGD::protobufs::ResultInfo PMGDQueryResultInfo; typedef PMGD::protobufs::QueryNode PMGDQueryNode; + typedef PMGD::protobufs::QueryEdge PMGDQueryEdge; typedef PMGD::protobufs::CommandResponse PMGDCmdResponse; typedef PMGD::protobufs::ResponseType PMGDRespType; typedef PMGDCmdResponse::ErrorCode PMGDCmdErrorCode; @@ -58,17 +60,19 @@ namespace VDMS { class PMGDQueryHandler { - class ReusableNodeIterator; + template + class ReusableIterator; + + typedef ReusableIterator ReusableNodeIterator; + typedef ReusableIterator ReusableEdgeIterator; + class MultiNeighborIteratorImpl; // Until we have a separate PMGD server this db lives here static PMGD::Graph *_db; - // Need this lock till we have concurrency support in PMGD - // TODO: Make this reader writer. - static std::mutex *_dblock; - PMGD::Transaction *_tx; + bool _readonly; // Variable changes per TX based on process_queries parameter. // Map an integer ID to a NodeIterator (reset at the end of each transaction). // This works for Adds and Queries. We assume that the client or @@ -79,19 +83,24 @@ namespace VDMS { // of finding out if the reference is for an AddNode or a QueryNode // and rather than searching multiple maps, we keep it uniform here. std::unordered_map _cached_nodes; + std::unordered_map _cached_edges; int process_query(const PMGDCmd *cmd, PMGDCmdResponse *response); void error_cleanup(std::vector &responses, PMGDCmdResponse *last_resp); int add_node(const PMGD::protobufs::AddNode &cn, PMGDCmdResponse *response); + int update_node(const PMGD::protobufs::UpdateNode &un, PMGDCmdResponse *response); int add_edge(const PMGD::protobufs::AddEdge &ce, PMGDCmdResponse *response); + int update_edge(const PMGD::protobufs::UpdateEdge &ue, PMGDCmdResponse *response); template void set_property(Element &e, const PMGDProp&p); int query_node(const PMGDQueryNode &qn, PMGDCmdResponse *response); + int query_edge(const PMGDQueryEdge &qe, PMGDCmdResponse *response); PMGD::PropertyPredicate construct_search_term(const PMGDPropPred &p_pp); PMGD::Property construct_search_property(const PMGDProp&p); template void build_results(Iterator &ni, - const PMGDQueryNode &qn, + const PMGDQueryResultInfo &qn, PMGDCmdResponse *response); void construct_protobuf_property(const PMGD::Property &j_p, PMGDProp*p_p); + void construct_missing_property(PMGDProp *p_p); void set_response(PMGDCmdResponse *response, PMGDCmdErrorCode error_code, std::string error_msg) @@ -116,8 +125,10 @@ namespace VDMS { } public: + class NodeEdgeIteratorImpl; static void init(); - PMGDQueryHandler() { _tx = NULL; } + static void destroy(); + PMGDQueryHandler() { _tx = NULL; _readonly = true; } // The vector here can contain just one JL command but will be surrounded by // TX begin and end. So just expose one call to the QueryHandler for @@ -128,6 +139,8 @@ namespace VDMS { // than the number of commands. // Ensure that the cmd_grp_id, that is the query number are in increasing // order and account for the TxBegin and TxEnd in numbering. - std::vector process_queries(const PMGDCmds &cmds, int num_groups); + std::vector process_queries(const PMGDCmds &cmds, + int num_groups, bool readonly); }; -}; + +}; // end VDMS namespace diff --git a/src/QueryHandler.cc b/src/QueryHandler.cc index a7dea438..4f2dd8e0 100644 --- a/src/QueryHandler.cc +++ b/src/QueryHandler.cc @@ -35,6 +35,10 @@ #include "QueryHandler.h" #include "ImageCommand.h" +#include "DescriptorsCommand.h" +#include "BoundingBoxCommand.h" +#include "VideoCommand.h" + #include "ExceptionsCommand.h" #include "PMGDQuery.h" @@ -55,11 +59,32 @@ valijson::Schema* QueryHandler::_schema = new valijson::Schema; void QueryHandler::init() { - _rs_cmds["AddEntity"] = new AddEntity(); - _rs_cmds["Connect"] = new Connect(); - _rs_cmds["FindEntity"] = new FindEntity(); - _rs_cmds["AddImage"] = new AddImage(); - _rs_cmds["FindImage"] = new FindImage(); + DescriptorsManager::init(); + + _rs_cmds["AddEntity"] = new AddEntity(); + _rs_cmds["UpdateEntity"] = new UpdateEntity(); + _rs_cmds["FindEntity"] = new FindEntity(); + + _rs_cmds["AddConnection"] = new AddConnection(); + _rs_cmds["UpdateConnection"] = new UpdateConnection(); + _rs_cmds["FindConnection"] = new FindConnection(); + + _rs_cmds["AddImage"] = new AddImage(); + _rs_cmds["UpdateImage"] = new UpdateImage(); + _rs_cmds["FindImage"] = new FindImage(); + + _rs_cmds["AddDescriptorSet"] = new AddDescriptorSet(); + _rs_cmds["AddDescriptor"] = new AddDescriptor(); + _rs_cmds["FindDescriptor"] = new FindDescriptor(); + _rs_cmds["ClassifyDescriptor"] = new ClassifyDescriptor(); + + _rs_cmds["AddBoundingBox"] = new AddBoundingBox(); + _rs_cmds["UpdateBoundingBox"] = new UpdateBoundingBox(); + _rs_cmds["FindBoundingBox"] = new FindBoundingBox(); + + _rs_cmds["AddVideo"] = new AddVideo(); + _rs_cmds["UpdateVideo"] = new UpdateVideo(); + _rs_cmds["FindVideo"] = new FindVideo(); // Load the string containing the schema (api_schema/APISchema.h) Json::Reader reader; @@ -129,8 +154,7 @@ bool QueryHandler::syntax_checker(const Json::Value& root, Json::Value& error) valijson::adapters::JsonCppAdapter user_query(root); if (!_validator.validate(*_schema, user_query, &results)) { std::cerr << "API validation failed for:" << std::endl; - Json::StyledWriter swriter; - std::cerr << swriter.write(root) << std::endl; + std::cerr << root.toStyledString() << std::endl; // Will attempt to find the simple error // To avoid valijson dump @@ -172,10 +196,11 @@ bool QueryHandler::syntax_checker(const Json::Value& root, Json::Value& error) return true; } -int QueryHandler::parse_commands(const std::string& commands, +int QueryHandler::parse_commands(const protobufs::queryMessage& proto_query, Json::Value& root) { Json::Reader reader; + const std::string commands = proto_query.json(); try { bool parseSuccess = reader.parse(commands.c_str(), root); @@ -193,6 +218,28 @@ int QueryHandler::parse_commands(const std::string& commands, return -1; } + unsigned blob_counter = 0; + for (int j = 0; j < root.size(); j++) { + const Json::Value& query = root[j]; + assert(query.getMemberNames().size() == 1); + std::string cmd = query.getMemberNames()[0]; + + if (_rs_cmds[cmd]->need_blob(query)) { + blob_counter++; + } + } + + if (blob_counter != proto_query.blobs().size()) { + root = error; + root["info"] = std::string("Expected blobs: " + + std::to_string(blob_counter) + + ". Received blobs: " + + std::to_string(proto_query.blobs().size())); + root["status"] = RSCommand::Error; + std::cerr << "Not enough blobs!" << std::endl; + return -1; + } + } catch (Json::Exception const&) { root["info"] = "Json Exception at Parsing"; root["status"] = RSCommand::Error; @@ -202,12 +249,21 @@ int QueryHandler::parse_commands(const std::string& commands, return 0; } -void QueryHandler::cleanup_query(const std::vector& images) +// TODO create a better mechanism to cleanup queries that +// includes feature vectors and user-defined blobs +// For now, we do it for videos/images as a starting point. +void QueryHandler::cleanup_query(const std::vector& images, + const std::vector& videos) { for (auto& img_path : images) { VCL::Image img(img_path); img.delete_image(); } + + for (auto& vid_path : videos) { + VCL::Video img(vid_path); + img.delete_video(); + } } void QueryHandler::process_query(protobufs::queryMessage& proto_query, @@ -215,17 +271,32 @@ void QueryHandler::process_query(protobufs::queryMessage& proto_query, { Json::FastWriter fastWriter; + Json::Value root; + Json::Value exception_error; + std::stringstream error_msg; + auto exception_handler = [&]() { + // When exception is catched, we return the message. + std::cerr << "Failed Query: " << std::endl; + std::cerr << root << std::endl; + std::cerr << error_msg.str(); + std::cerr << "End Failed Query: " << std::endl; + exception_error["info"] = error_msg.str(); + exception_error["status"] = RSCommand::Error; + proto_res.set_json(fastWriter.write(exception_error)); + }; + try { Json::Value json_responses; - Json::Value root; Json::Value cmd_result; Json::Value cmd_current; std::vector images_log; + std::vector videos_log; + std::vector construct_results; auto error = [&](Json::Value& res, Json::Value& failed_command) { - cleanup_query(images_log); + cleanup_query(images_log, videos_log); res["FailedCommand"] = failed_command; json_responses.clear(); json_responses.append(res); @@ -235,7 +306,7 @@ void QueryHandler::process_query(protobufs::queryMessage& proto_query, std::cerr << w.write(json_responses); }; - if (parse_commands(proto_query.json(), root) != 0) { + if (parse_commands(proto_query, root) != 0) { cmd_current = "Transaction"; error(root, cmd_current); return; @@ -247,17 +318,13 @@ void QueryHandler::process_query(protobufs::queryMessage& proto_query, //iterate over the list of the queries for (int j = 0; j < root.size(); j++) { const Json::Value& query = root[j]; - assert(query.getMemberNames().size() == 1); std::string cmd = query.getMemberNames()[0]; int group_count = pmgd_query.add_group(); RSCommand* rscmd = _rs_cmds[cmd]; - // This has to go on the controls - assert(proto_query.blobs().size() > blob_count-1); - - const std::string& blob = rscmd->need_blob() ? + const std::string& blob = rscmd->need_blob(query) ? proto_query.blobs(blob_count++) : ""; int ret_code = rscmd->construct_protobuf(pmgd_query, query, blob, @@ -266,11 +333,16 @@ void QueryHandler::process_query(protobufs::queryMessage& proto_query, if (cmd_result.isMember("image_added")) { images_log.push_back(cmd_result["image_added"].asString()); } + if (cmd_result.isMember("video_added")) { + videos_log.push_back(cmd_result["video_added"].asString()); + } if (ret_code != 0) { error(cmd_result, root[j]); return; } + + construct_results.push_back(cmd_result); } Json::Value& tx_responses = pmgd_query.run(); @@ -280,16 +352,26 @@ void QueryHandler::process_query(protobufs::queryMessage& proto_query, cmd_result = tx_responses; cmd_result["info"] = "Failed PMGDTransaction"; cmd_result["status"] = RSCommand::Error; + Json::StyledWriter w; + std::cerr << w.write(tx_responses); error(cmd_result, cmd_current); return; } else { + blob_count = 0; for (int j = 0; j < root.size(); j++) { - std::string cmd = root[j].getMemberNames()[0]; + Json::Value& query = root[j]; + std::string cmd = query.getMemberNames()[0]; + + RSCommand* rscmd = _rs_cmds[cmd]; + + const std::string& blob = rscmd->need_blob(query) ? + proto_query.blobs(blob_count++) : ""; - cmd_result = _rs_cmds[cmd]->construct_responses( - tx_responses[j], - root[j], proto_res); + query["cp_result"] = construct_results[j]; + cmd_result = rscmd->construct_responses( + tx_responses[j], + query, proto_res, blob); // This is for error handling if (cmd_result.isMember("status")) { @@ -308,28 +390,39 @@ void QueryHandler::process_query(protobufs::queryMessage& proto_query, proto_res.set_json(fastWriter.write(json_responses)); - } catch (VCL::Exception e) { + } catch (VCL::Exception& e) { print_exception(e); - std::cerr << "FATAL ERROR: VCL Exception at QH" << std::endl; - exit(0); - } catch (PMGD::Exception e) { + error_msg << "Internal Server Error: VCL Exception at QH" << std::endl; + exception_handler(); + } catch (PMGD::Exception& e) { print_exception(e); - std::cerr << "FATAL ERROR: PMGD Exception at QH" << std::endl; - exit(0); - } catch (ExceptionCommand e) { + error_msg << "Internal Server Error: PMGD Exception at QH" + << std::endl; + exception_handler(); + } catch (ExceptionCommand& e) { print_exception(e); - std::cerr << "FATAL ERROR: Command Exception at QH" << std::endl; - exit(0); - } catch (Json::Exception const&) { - // Should not happen + error_msg << "Internal Server Error: Command Exception at QH" + << std::endl; + exception_handler(); + } catch (Json::Exception const& e) { // In case of error on the last fastWriter - std::cerr << "FATAL: Json Exception!" << std::endl; - Json::Value error; - error["info"] = "Internal Server Error: Json Exception"; - error["status"] = RSCommand::Error; - proto_res.set_json(fastWriter.write(error)); - } catch (const std::invalid_argument& ex) { - std::cerr << "Invalid argument: " << ex.what() << '\n'; - exit(0); + error_msg << "Internal Server Error: Json Exception: " + << e.what() << std::endl; + exception_handler(); + } catch (google::protobuf::FatalException& e) { + // Need to be carefull with this, may lead to memory leak. + // Protoubuf is not exception safe. + error_msg << "Internal Server Error: Protobuf Exception: " + << e.what() << std::endl; + exception_handler(); + } catch (const std::invalid_argument& e) { + error_msg << "FATAL: Invalid argument: " << e.what() << std::endl; + exception_handler(); + } catch (const std::exception& e) { + error_msg << "std Exception: " << e.what() << std::endl; + exception_handler(); + } catch (...) { + error_msg << "Unknown Exception" << std::endl; + exception_handler(); } } diff --git a/src/QueryHandler.h b/src/QueryHandler.h index 170ad22e..6147407f 100644 --- a/src/QueryHandler.h +++ b/src/QueryHandler.h @@ -59,8 +59,11 @@ typedef ::google::protobuf::RepeatedPtrField BlobArray; PMGDQueryHandler _pmgd_qh; bool syntax_checker(const Json::Value &root, Json::Value& error); - int parse_commands(const std::string& commands, Json::Value& root); - void cleanup_query(const std::vector& images); + int parse_commands(const protobufs::queryMessage& proto_query, + Json::Value& root); + void cleanup_query(const std::vector& images, + const std::vector& videos); + void process_query(protobufs::queryMessage& proto_query, protobufs::queryMessage& response); diff --git a/src/RSCommand.cc b/src/RSCommand.cc index d54d8ddc..c87d48f0 100644 --- a/src/RSCommand.cc +++ b/src/RSCommand.cc @@ -31,14 +31,17 @@ #include #include +#include +#include #include "QueryHandler.h" #include "ExceptionsCommand.h" +#include "VDMSConfig.h" +#include "VCL.h" +#include "defines.h" using namespace VDMS; -#define VDMS_GENERIC_LINK "AT:edge" - RSCommand::RSCommand(const std::string& cmd_name): _cmd_name(cmd_name) { @@ -47,7 +50,8 @@ RSCommand::RSCommand(const std::string& cmd_name): Json::Value RSCommand::construct_responses( Json::Value& response, const Json::Value& json, - protobufs::queryMessage &query_res) + protobufs::queryMessage &query_res, + const std::string& blob) { Json::Value ret; ret[_cmd_name] = check_responses(response); @@ -76,6 +80,8 @@ Json::Value RSCommand::check_responses(Json::Value& responses) } } + ret = responses[0]; + if (!flag_error) { ret["status"] = RSCommand::Success; } @@ -94,6 +100,16 @@ int RSCommand::get_value(const Json::Value& json, const std::string& key, return def; } +template<> +double RSCommand::get_value(const Json::Value& json, const std::string& key, + double def) +{ + if (json.isMember(key)) + return json[key].asDouble(); + + return def; +} + template<> bool RSCommand::get_value(const Json::Value& json, const std::string& key, bool def) @@ -145,6 +161,13 @@ void RSCommand::add_link(PMGDQuery& query, const Json::Value& link, AddEntity::AddEntity() : RSCommand("AddEntity") { + _storage_blob = VDMSConfig::instance()->get_path_blobs(); +} + +bool AddEntity::need_blob(const Json::Value& jsoncmd) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + return get_value(cmd, "blob", false); } int AddEntity::construct_protobuf(PMGDQuery& query, @@ -154,31 +177,77 @@ int AddEntity::construct_protobuf(PMGDQuery& query, Json::Value& error) { const Json::Value& cmd = jsoncmd[_cmd_name]; + bool link = cmd.isMember("link"); int node_ref = get_value(cmd, "_ref", - query.get_available_reference()); + link ? query.get_available_reference() : -1); + + // Modifiyng the existing properties that the user gives + // is a good option to make the AddNode more simple. + // This is not ideal since we are manupulating with user's + // input, but for now it is an acceptable solution. + Json::Value props = get_value(cmd, "properties"); + + if (get_value(cmd, "blob", false)) { + std::ostringstream oss; + oss << std::hex << VCL::get_uint64(); + std::string file_name = _storage_blob + "/" + oss.str(); + + props[VDMS_EN_BLOB_PATH_PROP] = file_name; + + std::ofstream file; + file.open(file_name); + file << blob; + file.close(); + } query.AddNode( node_ref, get_value(cmd, "class"), - cmd["properties"], + props, cmd["constraints"] ); - if (cmd.isMember("link")) { + if (link) { add_link(query, cmd["link"], node_ref, VDMS_GENERIC_LINK); } return 0; } -//========= Connect definitions ========= +//========= UpdateEntity definitions ========= -Connect::Connect() : RSCommand("Connect") +UpdateEntity::UpdateEntity() : RSCommand("UpdateEntity") { } -int Connect::construct_protobuf( +int UpdateEntity::construct_protobuf(PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + query.UpdateNode( + get_value(cmd, "_ref", -1), + get_value(cmd, "class"), + cmd["properties"], + cmd["remove_props"], + cmd["constraints"], + get_value(cmd, "unique", false) + ); + + return 0; +} + +//========= AddConnection definitions ========= + +AddConnection::AddConnection() : RSCommand("AddConnection") +{ +} + +int AddConnection::construct_protobuf( PMGDQuery& query, const Json::Value& jsoncmd, const std::string& blob, @@ -198,6 +267,34 @@ int Connect::construct_protobuf( return 0; } +//========= UpdateConnection definitions ========= + +UpdateConnection::UpdateConnection() : RSCommand("UpdateConnection") +{ +} + +int UpdateConnection::construct_protobuf(PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + query.UpdateEdge( + get_value(cmd, "_ref", -1), + get_value(cmd, "ref1", -1), + get_value(cmd, "ref2", -1), + get_value(cmd, "class"), + cmd["properties"], + cmd["remove_props"], + cmd["constraints"], + get_value(cmd, "unique", false) + ); + + return 0; +} + //========= FindEntity definitions ========= FindEntity::FindEntity() : RSCommand("FindEntity") @@ -213,12 +310,18 @@ int FindEntity::construct_protobuf( { const Json::Value& cmd = jsoncmd[_cmd_name]; + Json::Value results = get_value(cmd, "results"); + + if (get_value(results, "blob", false)){ + results["list"].append(VDMS_EN_BLOB_PATH_PROP); + } + query.QueryNode( get_value(cmd, "_ref", -1), get_value(cmd, "class"), cmd["link"], cmd["constraints"], - cmd["results"], + results, get_value(cmd, "unique", false) ); @@ -228,15 +331,69 @@ int FindEntity::construct_protobuf( Json::Value FindEntity::construct_responses( Json::Value& response, const Json::Value& json, - protobufs::queryMessage &query_res) + protobufs::queryMessage &query_res, + const std::string &blob) { assert(response.size() == 1); Json::Value ret; + Json::Value& findEnt = response[0]; + + const Json::Value& cmd = json[_cmd_name]; + + if (get_value(cmd["results"], "blob", false)) { + for (auto& ent : findEnt["entities"]) { + + if(ent.isMember(VDMS_EN_BLOB_PATH_PROP)) { + std::string blob_path = ent[VDMS_EN_BLOB_PATH_PROP].asString(); + ent.removeMember(VDMS_EN_BLOB_PATH_PROP); + + std::string* blob_str = query_res.add_blobs(); + std::ifstream t(blob_path); + t.seekg(0, std::ios::end); + size_t size = t.tellg(); + blob_str->resize(size); + t.seekg(0); + t.read((char*)blob_str->data(), size); + + // For those cases the entity does not have a blob. + // We need to indicate which entities have blobs. + ent["blob"] = true; + } + } + } // This will change the response tree, // but it is ok and avoids a copy - ret[_cmd_name].swap(response[0]); + ret[_cmd_name].swap(findEnt); return ret; } + +//========= FindConnection definitions ========= + +FindConnection::FindConnection() : RSCommand("FindConnection") +{ +} + +int FindConnection::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + query.QueryEdge( + get_value(cmd, "_ref", -1), + get_value(cmd, "ref1", -1), + get_value(cmd, "ref2", -1), + get_value(cmd, "class"), + cmd["constraints"], + cmd["results"], + get_value(cmd, "unique", false) + ); + + return 0; +} diff --git a/src/RSCommand.h b/src/RSCommand.h index 9d30942a..1a3fb62d 100644 --- a/src/RSCommand.h +++ b/src/RSCommand.h @@ -72,7 +72,7 @@ namespace VDMS { RSCommand(const std::string& cmd_name); - virtual bool need_blob() { return false; } + virtual bool need_blob(const Json::Value& cmd) { return false; } virtual int construct_protobuf( PMGDQuery& query, @@ -84,11 +84,15 @@ namespace VDMS { virtual Json::Value construct_responses( Json::Value& json_responses, const Json::Value& json, - protobufs::queryMessage &response); + protobufs::queryMessage &response, + const std::string &blob); }; class AddEntity : public RSCommand { + private: + std::string _storage_blob; + public: AddEntity(); int construct_protobuf(PMGDQuery& query, @@ -96,17 +100,43 @@ namespace VDMS { const std::string& blob, int grp_id, Json::Value& error); + + bool need_blob(const Json::Value& jsoncmd); + }; + + class AddConnection : public RSCommand + { + public: + AddConnection(); + int construct_protobuf(PMGDQuery& query, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); }; - class Connect : public RSCommand + class UpdateEntity : public RSCommand { public: - Connect(); + UpdateEntity(); int construct_protobuf(PMGDQuery& query, const Json::Value& root, const std::string& blob, int grp_id, Json::Value& error); + + }; + + class UpdateConnection : public RSCommand + { + public: + UpdateConnection(); + int construct_protobuf(PMGDQuery& query, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + }; class FindEntity : public RSCommand @@ -122,7 +152,19 @@ namespace VDMS { Json::Value construct_responses( Json::Value& json_responses, const Json::Value& json, - protobufs::queryMessage &response); + protobufs::queryMessage &response, + const std::string &blob); }; + class FindConnection : public RSCommand + { + public: + FindConnection(); + int construct_protobuf(PMGDQuery& query, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + }; }; // namespace VDMS diff --git a/src/SearchExpression.cc b/src/SearchExpression.cc index 3ceb2317..4bf29d1d 100644 --- a/src/SearchExpression.cc +++ b/src/SearchExpression.cc @@ -33,7 +33,9 @@ #include "pmgd.h" #include "neighbor.h" -class SearchExpression::SearchExpressionIterator : public PMGD::NodeIteratorImplIntf +using namespace VDMS; + +class SearchExpression::NodeAndIteratorImpl : public PMGD::NodeIteratorImplIntf { /// Reference to expression to evaluate const SearchExpression _expr; @@ -72,7 +74,7 @@ class SearchExpression::SearchExpressionIterator : public PMGD::NodeIteratorImpl /// /// Postcondition: mNodeIt points to the first matching node, or /// returns NULL. - SearchExpressionIterator(const SearchExpression &expr) + NodeAndIteratorImpl(const SearchExpression &expr) : _expr(expr), mNodeIt(_expr._db.get_nodes(_expr.tag(), (_expr._predicates.empty() ? PMGD::PropertyPredicate() @@ -87,7 +89,7 @@ class SearchExpression::SearchExpressionIterator : public PMGD::NodeIteratorImpl /// /// Postcondition: mNodeIt points to the first matching node, or /// returns NULL. - SearchExpressionIterator(const PMGD::Node &node, PMGD::Direction dir, + NodeAndIteratorImpl(const PMGD::Node &node, PMGD::Direction dir, PMGD::StringID edgetag, bool unique, const SearchExpression &neighbor_expr) : _expr(neighbor_expr), @@ -111,8 +113,108 @@ class SearchExpression::SearchExpressionIterator : public PMGD::NodeIteratorImpl PMGD::Node *ref() { return &*mNodeIt; } }; +class SearchExpression::NodeOrIteratorImpl : public PMGD::NodeIteratorImplIntf +{ + /// Reference to expression to evaluate + const SearchExpression _expr; + + /// Node iterator on the first property predicate + PMGD::Node* _node; + + // Indicate where to start in the search expression vector + unsigned _idx; + + // Indicate if it is a neighbor search + bool _neighbor; + + PMGD::NodeIterator _neighborIt; + + /// Advance to the next matching node + /// @returns true if we find a matching node + /// Precondition: _node points to the next possible node + /// candidate + bool _next() + { + while (_idx < _expr._predicates.size()) { + PMGD::NodeIterator ni = + _expr._db.get_nodes(_expr.tag(), + _expr._predicates.at(_idx++)); + + if (ni) { + _node = &*ni; + return true; + } + } + + return false; + } + + bool _next_neighbor() + { + static int id = 0; + while (_neighborIt) { + for (const auto& pred : _expr._predicates) { + PMGD::PropertyFilter pf(pred); + if (pf(*_neighborIt) == PMGD::Pass) { + _node = &*_neighborIt; + return true; + } + } + + _neighborIt.next(); + } + + return false; + } + +public: + /// Construct an iterator given the search expression + /// + /// Postcondition: _node points to the first matching node, or + /// returns NULL. + NodeOrIteratorImpl(const SearchExpression &expr) + : _expr(expr), + _idx(0), + _neighbor(false), + _neighborIt(NULL) + { + _next(); + } + + /// Construct an iterator given the search expression for neighbors + /// + /// Postcondition: _node points to the first matching node, or + /// returns NULL. + NodeOrIteratorImpl(const PMGD::Node &node, PMGD::Direction dir, + PMGD::StringID edgetag, bool unique, + const SearchExpression &neighbor_expr) + : _expr(neighbor_expr), + _neighborIt(get_neighbors(node, dir, edgetag, unique)), + _neighbor(true) + { + _next_neighbor(); + } + + operator bool() const { return bool(_node); } + + /// Advance to the next node + /// @returns true if such a next node exists + bool next() + { + if (_neighbor) { + _neighborIt.next(); + return _next_neighbor(); + } + else { + return _next(); + } + } + + PMGD::Node *ref() { return _node; } +}; + // *** Could find a template way of combining Node and Edge iterator. -class SearchExpression::EdgeSearchExpressionIterator : public PMGD::EdgeIteratorImplIntf +class SearchExpression::EdgeAndIteratorImpl : public PMGD::EdgeIteratorImplIntf { /// Reference to expression to evaluate const SearchExpression &_expr; @@ -143,7 +245,7 @@ class SearchExpression::EdgeSearchExpressionIterator : public PMGD::EdgeIterator /// /// Postcondition: mEdgeIt points to the first matching edge, or /// returns NULL. - EdgeSearchExpressionIterator(const SearchExpression &expr) + EdgeAndIteratorImpl(const SearchExpression &expr) : _expr(expr), mEdgeIt(_expr._db.get_edges(_expr.tag(), (_expr._predicates.empty() ? PMGD::PropertyPredicate() @@ -173,20 +275,27 @@ class SearchExpression::EdgeSearchExpressionIterator : public PMGD::EdgeIterator /// @returns an iterator over the search expression PMGD::NodeIterator SearchExpression::eval_nodes() { - return PMGD::NodeIterator(new SearchExpressionIterator(*this)); + if (_or) + return PMGD::NodeIterator(new NodeOrIteratorImpl(*this)); + else + return PMGD::NodeIterator(new NodeAndIteratorImpl(*this)); } /// Evaluate the associated search expression on neighbors /// @returns an iterator over the search expression -PMGD::NodeIterator SearchExpression::eval_nodes(const PMGD::Node &node, PMGD::Direction dir, - PMGD::StringID edgetag, bool unique) +PMGD::NodeIterator SearchExpression::eval_nodes + (const PMGD::Node &node, PMGD::Direction dir, + PMGD::StringID edgetag, bool unique) { - return PMGD::NodeIterator(new SearchExpressionIterator(node, dir, edgetag, unique, *this)); + if (_or) + return PMGD::NodeIterator(new NodeOrIteratorImpl(node, dir, edgetag, unique, *this)); + else + return PMGD::NodeIterator(new NodeAndIteratorImpl(node, dir, edgetag, unique, *this)); } /// Evaluate the associated search expression /// @returns an iterator over the search expression PMGD::EdgeIterator SearchExpression::eval_edges() { - return PMGD::EdgeIterator(new EdgeSearchExpressionIterator(*this)); + return PMGD::EdgeIterator(new EdgeAndIteratorImpl(*this)); } diff --git a/src/SearchExpression.h b/src/SearchExpression.h index 67354a10..5bf1f271 100644 --- a/src/SearchExpression.h +++ b/src/SearchExpression.h @@ -44,31 +44,45 @@ /// predicates. /// /// Calling Eval() returns a node iterator. -class SearchExpression { - PMGD::StringID _tag; +namespace VDMS { - /// Opaque definition of a node iterator - class SearchExpressionIterator; + class SearchExpression + { + PMGD::StringID _tag; - /// Opaque definition of an edge iterator - class EdgeSearchExpressionIterator; + /// Opaque definition of a node iterator + class NodeAndIteratorImpl; + class NodeOrIteratorImpl; - /// The conjunctions of property predicates - std::vector _predicates; + /// Opaque definition of an edge iterator + class EdgeAndIteratorImpl; - /// A pointer to the database - PMGD::Graph &_db; + bool _or; -public: - /// Construction requires a handle to a database - SearchExpression(PMGD::Graph &db, PMGD::StringID tag) : _db(db), _tag(tag) {} + /// The conjunctions of property predicates + std::vector _predicates; - void add(PMGD::PropertyPredicate pp) { _predicates.push_back(pp); } - const PMGD::StringID tag() const { return _tag; }; + /// A pointer to the database + PMGD::Graph &_db; - PMGD::NodeIterator eval_nodes(); - PMGD::NodeIterator eval_nodes(const PMGD::Node &node, PMGD::Direction dir = PMGD::Any, - PMGD::StringID edgetag = 0, bool unique = true); + public: + /// Construction requires a handle to a database + SearchExpression(PMGD::Graph &db, PMGD::StringID tag, bool p_or) : + _db(db), _tag(tag), _or(p_or) {} - PMGD::EdgeIterator eval_edges(); -}; + void add(PMGD::PropertyPredicate pp) { _predicates.push_back(pp); } + const PMGD::StringID tag() const { return _tag; }; + PMGD::Graph &db() const { return _db; } + const PMGD::PropertyPredicate &predicate(int i) const { return _predicates.at(i); } + const size_t num_predicates() const { return _predicates.size(); } + + PMGD::NodeIterator eval_nodes(); + PMGD::NodeIterator eval_nodes(const PMGD::Node &node, + PMGD::Direction dir = PMGD::Any, + PMGD::StringID edgetag = 0, + bool unique = true); + + PMGD::EdgeIterator eval_edges(); + }; + +}; // end VDMS namespace diff --git a/src/Server.cc b/src/Server.cc index c8a57f0e..8be6e9ca 100644 --- a/src/Server.cc +++ b/src/Server.cc @@ -37,6 +37,7 @@ #include "VDMSConfig.h" #include "QueryHandler.h" +#include "DescriptorsManager.h" #include "protobuf/pmgdMessages.pb.h" // Protobuff implementation @@ -104,4 +105,7 @@ Server::~Server() { _cm->shutdown(); delete _cm; + PMGDQueryHandler::destroy(); + DescriptorsManager::instance()->flush(); + VDMSConfig::destroy(); } diff --git a/src/VDMSConfig.cc b/src/VDMSConfig.cc index 440f9156..f3483ee0 100644 --- a/src/VDMSConfig.cc +++ b/src/VDMSConfig.cc @@ -34,10 +34,24 @@ #include #include +#include +#include +#include + #include #include "VDMSConfig.h" +#define DEFAULT_PATH_ROOT "db" +#define DEFAULT_PATH_PMGD "graph" +#define DEFAULT_PATH_IMAGES "images" +#define DEFAULT_PATH_JPG "jpg" +#define DEFAULT_PATH_PNG "png" +#define DEFAULT_PATH_TDB "tdb" +#define DEFAULT_PATH_BLOBS "blobs" +#define DEFAULT_PATH_VIDEOS "videos" +#define DEFAULT_PATH_DESCRIPTORS "descriptors" + using namespace VDMS; VDMSConfig* VDMSConfig::cfg; @@ -76,8 +90,12 @@ VDMSConfig::VDMSConfig(std::string config_file) bool parsingSuccessful = reader.parse(file, json_config); if (!parsingSuccessful){ - std::cout << "Error parsing config file" << std::endl; + std::cout << "Error parsing config file." << std::endl; + std::cout << "Exiting..." << std::endl; + exit(0); } + + build_dirs(); } int VDMSConfig::get_int_value(std::string val, int def) @@ -89,3 +107,83 @@ std::string VDMSConfig::get_string_value(std::string val, std::string def) { return json_config.get(val, def).asString(); } + +// This method will check if the dir exists, +// and create the dir if it does not exist. +int VDMSConfig::create_dir(std::string path) +{ + struct stat sb; + while (1) + if (stat(path.c_str(), &sb) == 0) + if (sb.st_mode & S_IFDIR) + return 0; + else + return EEXIST; + else if (errno != ENOENT) + return errno; + else if (mkdir(path.c_str(), 0777) == 0) + return 0; + else if (errno != EEXIST) + return errno; +} + +void VDMSConfig::check_or_create(std::string path) +{ + if (create_dir(path) == 0){ + return; + } + else{ + std::cout << "Cannot open/create directories structure." << std::endl; + std::cout << "Failed dir: " << path << std::endl; + std::cout << "Check paths and permissions." << std::endl; + std::cout << "Exiting..." << std::endl; + exit(0); + } +} + +void VDMSConfig::build_dirs() +{ + // Root + path_root = get_string_value(PARAM_DB_ROOT, DEFAULT_PATH_ROOT); + check_or_create(path_root); + + // PMGD + path_pmgd = path_root + "/" + DEFAULT_PATH_PMGD; + path_pmgd = get_string_value(PARAM_DB_PMGD, path_pmgd); + check_or_create(path_pmgd); + + // IMAGES + path_images = path_root + "/" + DEFAULT_PATH_IMAGES; + path_images = get_string_value(PARAM_DB_IMAGES, path_images); + check_or_create(path_images); + + // IMAGES - PNG + path_png = path_images + "/" + DEFAULT_PATH_PNG; + path_png = get_string_value(PARAM_DB_PNG, path_png); + check_or_create(path_png); + + // IMAGES - JPG + path_jpg = path_images + "/" + DEFAULT_PATH_JPG; + path_jpg = get_string_value(PARAM_DB_JPG, path_jpg); + check_or_create(path_jpg); + + // IMAGES - TDB + path_tdb = path_images + "/" + DEFAULT_PATH_TDB; + path_tdb = get_string_value(PARAM_DB_TDB, path_tdb); + check_or_create(path_tdb); + + // BLOBS + path_blobs = path_root + "/" + DEFAULT_PATH_BLOBS; + path_blobs = get_string_value(PARAM_DB_BLOBS, path_blobs); + check_or_create(path_blobs); + + // VIDEOS + path_videos = path_root + "/" + DEFAULT_PATH_VIDEOS; + path_videos = get_string_value(PARAM_DB_VIDEOS, path_videos); + check_or_create(path_videos); + + // DESCRIPTORS + path_descriptors = path_root + "/" + DEFAULT_PATH_DESCRIPTORS; + path_descriptors = get_string_value(PARAM_DB_DESCRIPTORS, path_descriptors); + check_or_create(path_descriptors); +} diff --git a/src/VDMSConfig.h b/src/VDMSConfig.h index 509975a7..53e38542 100644 --- a/src/VDMSConfig.h +++ b/src/VDMSConfig.h @@ -34,6 +34,20 @@ #include #include +// Parameters in the JSON config file +#define PARAM_DB_ROOT "db_root_path" +#define PARAM_DB_PMGD "pmgd_path" +#define PARAM_DB_IMAGES "images_path" +#define PARAM_DB_PNG "png_path" +#define PARAM_DB_JPG "jpg_path" +#define PARAM_DB_TDB "tdb_path" +#define PARAM_DB_BLOBS "blobs_path" +#define PARAM_DB_VIDEOS "videos_path" +#define PARAM_DB_DESCRIPTORS "descriptors_path" + +#define PARAM_PMGD_NUM_ALLOCATORS "pmgd_num_allocators" +#define DEFAULT_PMGD_NUM_ALLOCATORS 1 + namespace VDMS{ class VDMSConfig @@ -48,11 +62,34 @@ namespace VDMS{ static VDMSConfig* cfg; Json::Value json_config; + // Dirs + std::string path_root; + std::string path_pmgd; + std::string path_images; + std::string path_png; + std::string path_jpg; + std::string path_tdb; + std::string path_blobs; + std::string path_videos; + std::string path_descriptors; + VDMSConfig(std::string config_file); + void build_dirs(); + void check_or_create(std::string path); + int create_dir(std::string path); + public: int get_int_value(std::string val, int def); std::string get_string_value(std::string val, std::string def); + const std::string& get_path_root() {return path_root;} + const std::string& get_path_pmgd() {return path_pmgd;} + const std::string& get_path_jpg() {return path_jpg;} + const std::string& get_path_png() {return path_png;} + const std::string& get_path_tdb() {return path_tdb;} + const std::string& get_path_blobs() {return path_blobs;} + const std::string& get_path_videos(){return path_videos;} + const std::string& get_path_descriptors() {return path_descriptors;} }; }; // vdms namespace diff --git a/src/VideoCommand.cc b/src/VideoCommand.cc new file mode 100644 index 00000000..17bdc77e --- /dev/null +++ b/src/VideoCommand.cc @@ -0,0 +1,352 @@ +/** + * @file VideoCommand.cc + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include +#include + +#include "VideoCommand.h" +#include "VDMSConfig.h" +#include "defines.h" + +using namespace VDMS; + +VideoCommand::VideoCommand(const std::string &cmd_name): + RSCommand(cmd_name) +{ +} + +void VideoCommand::enqueue_operations(VCL::Video& video, const Json::Value& ops) +{ + // Correct operation type and parameters are guaranteed at this point + for (auto& op : ops) { + const std::string& type = get_value(op, "type"); + std::string unit ; + if (type == "threshold") { + video.threshold(get_value(op, "value")); + + } + else if (type == "interval") { + + video.interval( + VCL::Video::FRAMES, + get_value(op, "start"), + get_value(op, "stop"), + get_value(op, "step")); + + } + else if (type == "resize") { + video.resize(get_value(op, "height"), + get_value(op, "width") ); + + } + else if (type == "crop") { + video.crop(VCL::Rectangle ( + get_value(op, "x"), + get_value(op, "y"), + get_value(op, "width"), + get_value(op, "height") )); + } + else { + throw ExceptionCommand(ImageError, "Operation not defined"); + } + } +} + +VCL::Video::Codec VideoCommand::string_to_codec(const std::string& codec) +{ + if (codec == "h263") { + return VCL::Video::Codec::H263; + } + else if (codec == "xvid") { + return VCL::Video::Codec::XVID; + } + else if (codec == "h264") { + return VCL::Video::Codec::H264; + } + + return VCL::Video::Codec::NOCODEC; +} + +//========= AddVideo definitions ========= + +AddVideo::AddVideo() : VideoCommand("AddVideo") +{ + _storage_video = VDMSConfig::instance()->get_path_videos(); +} + +int AddVideo::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + int node_ref = get_value(cmd, "_ref", + query.get_available_reference()); + + VCL::Video video((void*)blob.data(), blob.size()); + + if (cmd.isMember("operations")) { + enqueue_operations(video, cmd["operations"]); + } + + // The container and codec are checked by the schema. + // We default to mp4 and h264, if not specified + const std::string& container = + get_value(cmd, "container", "mp4"); + const std::string& file_name = + VCL::create_unique(_storage_video, container); + + // Modifiyng the existing properties that the user gives + // is a good option to make the AddNode more simple. + // This is not ideal since we are manupulating with user's + // input, but for now it is an acceptable solution. + Json::Value props = get_value(cmd, "properties"); + props[VDMS_VID_PATH_PROP] = file_name; + + // Add Video node + query.AddNode(node_ref, VDMS_VID_TAG, props, Json::Value()); + + const std::string& codec = get_value(cmd, "codec", "h264"); + VCL::Video::Codec vcl_codec = string_to_codec(codec); + + video.store(file_name, vcl_codec); + + // In case we need to cleanup the query + error["video_added"] = file_name; + + if (cmd.isMember("link")) { + add_link(query, cmd["link"], node_ref, VDMS_VID_EDGE); + } + + return 0; +} + +//========= UpdateImage definitions ========= + +UpdateVideo::UpdateVideo() : VideoCommand("UpdateVideo") +{ +} + +int UpdateVideo::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + int node_ref = get_value(cmd, "_ref", -1); + + Json::Value constraints = get_value(cmd, "constraints"); + + Json::Value props = get_value(cmd, "properties"); + + Json::Value remove_props = get_value(cmd, "remove_props"); + + // Update Image node + query.UpdateNode(node_ref, VDMS_VID_TAG, props, + remove_props, + constraints, + get_value(cmd, "unique", false)); + + return 0; +} + +Json::Value UpdateVideo::construct_responses( + Json::Value& responses, + const Json::Value& json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + assert(responses.size() == 1); + + Json::Value ret; + + // TODO In order to support "codec" or "operations", we could + // implement VCL save operation here. + + ret[_cmd_name].swap(responses[0]); + return ret; +} + +//========= FindVideo definitions ========= + +FindVideo::FindVideo() : VideoCommand("FindVideo") +{ +} + +int FindVideo::construct_protobuf( + PMGDQuery& query, + const Json::Value& jsoncmd, + const std::string& blob, + int grp_id, + Json::Value& error) +{ + const Json::Value& cmd = jsoncmd[_cmd_name]; + + Json::Value results = get_value(cmd, "results"); + + // Unless otherwhise specified, we return the blob. + if (get_value(results, "blob", true)){ + results["list"].append(VDMS_VID_PATH_PROP); + } + + query.QueryNode( + get_value(cmd, "_ref", -1), + VDMS_VID_TAG, + cmd["link"], + cmd["constraints"], + results, + get_value(cmd, "unique", false) + ); + + return 0; +} + +Json::Value FindVideo::construct_responses( + Json::Value& responses, + const Json::Value& json, + protobufs::queryMessage &query_res, + const std::string &blob) +{ + const Json::Value& cmd = json[_cmd_name]; + + Json::Value ret; + + auto error = [&](Json::Value& res) + { + ret[_cmd_name] = res; + return ret; + }; + + if (responses.size() != 1) { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "PMGD Response Bad Size"; + error(return_error); + } + + Json::Value& FindVideo = responses[0]; + + assert(FindVideo.isMember("entities")); + + if (FindVideo["status"] != 0) { + FindVideo["status"] = RSCommand::Error; + // Uses PMGD info error. + error(FindVideo); + } + + bool flag_empty = true; + + for (auto& ent : FindVideo["entities"]) { + + if(!ent.isMember(VDMS_VID_PATH_PROP)){ + continue; + } + + std::string video_path = ent[VDMS_VID_PATH_PROP].asString(); + ent.removeMember(VDMS_VID_PATH_PROP); + + if (ent.getMemberNames().size() > 0) { + flag_empty = false; + } + try { + if (!cmd.isMember("operations") && + !cmd.isMember("container") && + !cmd.isMember("codec")) + { + // Return video as is. + std::ifstream ifile(video_path, std::ifstream::in); + ifile.seekg(0, std::ios::end); + size_t encoded_size = (long)ifile.tellg(); + ifile.seekg(0, std::ios::beg); + + std::string* video_str = query_res.add_blobs(); + video_str->resize(encoded_size); + ifile.read((char*)(video_str->data()), encoded_size); + ifile.close(); + } + else { + + VCL::Video video(video_path); + + if (cmd.isMember("operations")) { + enqueue_operations(video, cmd["operations"]); + } + + const std::string& container = + get_value(cmd, "container", "mp4"); + const std::string& file_name = + VCL::create_unique("/tmp/", container); + const std::string& codec = + get_value(cmd, "codec", "h264"); + + VCL::Video::Codec vcl_codec = string_to_codec(codec); + video.store(file_name, vcl_codec); // to /tmp/ for encoding. + + auto video_enc = video.get_encoded(); + int size = video_enc.size(); + + if (size > 0) { + + std::string* video_str = query_res.add_blobs(); + video_str->resize(size); + std::memcpy((void*)video_str->data(), + (void*)video_enc.data(), + size); + } + else { + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "Video Data not found"; + error(return_error); + } + } + } catch (VCL::Exception e) { + print_exception(e); + Json::Value return_error; + return_error["status"] = RSCommand::Error; + return_error["info"] = "VCL Exception"; + error(return_error); + } + } + + if (flag_empty) { + FindVideo.removeMember("entities"); + } + + ret[_cmd_name].swap(FindVideo); + return ret; +} diff --git a/src/VideoCommand.h b/src/VideoCommand.h new file mode 100644 index 00000000..89c37d15 --- /dev/null +++ b/src/VideoCommand.h @@ -0,0 +1,119 @@ +/** + * @file VideoCommand.h + * + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#pragma once +#include +#include +#include +#include "VCL.h" + +#include "RSCommand.h" +#include "ExceptionsCommand.h" + +namespace VDMS { + +// Helper classes for handling various JSON commands. + + class VideoCommand: public RSCommand + { + protected: + void enqueue_operations(VCL::Video& video, const Json::Value& op); + + VCL::Video::Codec string_to_codec(const std::string& codec); + + public: + + VideoCommand(const std::string &cmd_name); + + virtual int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error) = 0; + + virtual bool need_blob(const Json::Value& cmd) { return false; } + }; + + class AddVideo: public VideoCommand + { + const std::string DEFAULT_VIDEO_PATH = "videos/database"; + + std::string _storage_video; + + public: + AddVideo(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + bool need_blob(const Json::Value& cmd) { return true; } + }; + + class UpdateVideo: public VideoCommand + { + public: + UpdateVideo(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + Json::Value construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + }; + + class FindVideo: public VideoCommand + { + public: + FindVideo(); + + int construct_protobuf(PMGDQuery& tx, + const Json::Value& root, + const std::string& blob, + int grp_id, + Json::Value& error); + + Json::Value construct_responses( + Json::Value &json_responses, + const Json::Value &json, + protobufs::queryMessage &response, + const std::string &blob); + }; + +}; // namespace VDMS diff --git a/src/defines.h b/src/defines.h new file mode 100644 index 00000000..c5eb62f4 --- /dev/null +++ b/src/defines.h @@ -0,0 +1,89 @@ +/** + * @section LICENSE + * + * The MIT License + * + * @copyright Copyright (c) 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#pragma once + +// This defines should be included into *Command.cc files +// (RSCommand.cc, ImageCommand.cc, DescriptorsCommand.cc) + +/* Some conventions: +* Must start with VD: (not VDMS since we have a 16 char limit) +* Tags (for nodes and edges) are all upper case. +* Properties are cammel case, where the first word is lower case. +*/ + +// General + +#define VDMS_GENERIC_LINK "VD:LINK" + +// Entities + +#define VDMS_EN_BLOB_PATH_PROP "VD:blobPath" + +// Images + +#define VDMS_IM_TAG "VD:IMG" +#define VDMS_IM_EDGE_TAG "VD:IMGLINK" +#define VDMS_IM_PATH_PROP "VD:imgPath" + +// Descriptor Set + +#define VDMS_DESC_SET_TAG "VD:DESCSET" +#define VDMS_DESC_SET_EDGE_TAG "VD:DESCSETLINK" // link between set and desc +#define VDMS_DESC_SET_PATH_PROP "VD:descSetPath" +#define VDMS_DESC_SET_NAME_PROP "VD:name" +#define VDMS_DESC_SET_DIM_PROP "VD:dimensions" + +// Descriptor + +#define VDMS_DESC_TAG "VD:DESC" +#define VDMS_DESC_EDGE_TAG "VD:DESCLINK" +#define VDMS_DESC_LABEL_PROP "VD:label" +#define VDMS_DESC_ID_PROP "VD:descId" + +#define VDMS_DESC_LABEL_TAG "VD:DESCLABEL" +#define VDMS_DESC_LABEL_NAME_PROP "VD:labelName" +#define VDMS_DESC_LABEL_ID_PROP "VD:labelId" + +// Regions + +#define VDMS_ROI_TAG "VD:ROI" +#define VDMS_ROI_EDGE_TAG "VD:ROILINK" +#define VDMS_ROI_IMAGE_EDGE "VD:ROIIMGLINK" + +#define VDMS_ROI_COORD_X_PROP "VD:x1" +#define VDMS_ROI_COORD_Y_PROP "VD:y1" +#define VDMS_ROI_WIDTH_PROP "VD:width" +#define VDMS_ROI_HEIGHT_PROP "VD:height" + +// Videos + +#define VDMS_VID_TAG "VD:VID" +#define VDMS_VID_EDGE "VD:VIDLINK" +#define VDMS_VID_PATH_PROP "VD:videoPath" diff --git a/tests/AddAndFind_query.json b/tests/AddAndFind_query.json index a96e8718..93156985 100644 --- a/tests/AddAndFind_query.json +++ b/tests/AddAndFind_query.json @@ -1,217 +1,242 @@ [ - { - "AddEntity": - { - - "_ref": 1, - "class": "Patient", - - "properties": { - "Email":"rst@rashed.com", - "Age": 86, - "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, - "Name":"Mark", - "fv": {"_blob":"Raghed----ghjhsglfhwa"} - } - } - - }, - { - "AddEntity": - { - "_ref": 2, - "class": "Patient", - - "properties": { - "Email":"gst@raghehed.com1", - "Age": 80, - "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, - "fv":{"_blob" :"Ahmad----ghjhsglfhwa"}, - "Name": "Kai" - - } - } - - }, - { - "Connect": - { - "ref1" : 1, - "ref2" : 2, - "class": "Married", - "properties":{ - "since" : {"_date":"Sat Sep 1 19:59:24 PDT 1956"}, - "fv": {"_blob":"----ghjhsglfhwa"} - } - } - - }, - { - "AddEntity": - { - - "_ref": 3, - "class": "Patient", - - "properties": { - "Email":"ast@rashed.com", - "Age": 56, - "Birthday":{"_date":"Sat Oct 7 17:59:24 PDT 1946"}, - "Study": false, - "avg": 32.4, + { + "AddEntity": + { + + "_ref": 1, + "class": "Patient", + + "properties": { + "Email":"rst@rashed.com", + "Age": 86, + "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, + "Name":"Mark", + "fv": {"_blob":"Raghed----ghjhsglfhwa"} + } + } + + }, + { + "AddEntity": + { + "_ref": 2, + "class": "Patient", + + "properties": { + "Email":"gst@raghehed.com1", + "Age": 80, + "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, + "fv":{"_blob" :"Ahmad----ghjhsglfhwa"}, + "Name": "Kai" + + } + } + + }, + { + "AddConnection": + { + "ref1" : 1, + "ref2" : 2, + "class": "Married", + "properties":{ + "since" : {"_date":"Sat Sep 1 19:59:24 PDT 1956"}, + "fv": {"_blob":"----ghjhsglfhwa"}, + "city" : "Boston", + "location" : "residence" + } + } + + }, + { + "AddEntity": + { + + "_ref": 3, + "class": "Patient", + + "properties": { + "Email":"ast@rashed.com", + "Age": 56, + "Birthday":{"_date":"Sat Oct 7 17:59:24 PDT 1946"}, + "Study": false, + "avg": 32.4, "Name": "Mathias", "fv": {"_blob":"----ghjhsglfhwa"} - }, - "constraints": - { "Name": ["==", "Mathias"] - } - } - - }, - { - "AddEntity": - { - "_ref": 4, - "class": "Patient", - - "properties": { - "Email":"qst@raghehed.com1", - "Age": 25, - "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, - "Study": true, - "avg": 21.6, - "Name": "Alin", + }, + "constraints": + { "Name": ["==", "Mathias"] + } + } + + }, + { + "AddEntity": + { + "_ref": 4, + "class": "Patient", + + "properties": { + "Email":"qst@raghehed.com1", + "Age": 25, + "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, + "Study": true, + "avg": 21.6, + "Name": "Alin", "fv": {"_blob":"----ghjhsglfhwa"} - } - } + } + } - }, - { - "Connect": - { - "ref1" : 3, - "ref2" : 4, - "class": "Married", - "properties":{ - "since" : {"_date":"Sat Sep 1 19:59:24 PDT 1956"} - } - } + }, + { + "AddConnection": + { + "ref1" : 3, + "ref2" : 4, + "class": "Married", + "properties":{ + "since" : {"_date":"Sat Sep 1 19:59:24 PDT 1956"}, + "city" : "Chicago", + "location" : "hotel" + } + } - }, + }, - { - "FindEntity" : { - "_ref" : 15, - "class" : "Patient", - "constraints": { + { + "FindEntity" : { + "_ref" : 15, + "class" : "Patient", + "constraints": { - "Age": [">", 0, "<", 80 ] + "Age": [">", 0, "<", 80 ] - }, + }, - "results": { - "list": ["Name"] - } - } -}, + "results": { + "list": ["Name"] + } + } + }, -{ - "FindEntity" : { - "_ref" : 16, - "class" : "Patient", - "constraints": { + { + "FindEntity" : { + "_ref" : 16, + "class" : "Patient", + "constraints": { - "Age": [">", 0, "<", 80 ] + "Age": [">", 0, "<", 80 ] - }, + }, - "results": { - "list": ["Name"] - } - } -}, + "results": { + "list": ["Name"] + } + } + }, + + { + "FindEntity" : { + "class" : "Patient", + "constraints": { + "Age": [">", 0, "<=", 180 ] + }, + "results": { + "list":["Name","Age"] + } + } + }, + + { + "FindEntity" : { + "class" : "Patient", + "constraints": { + "avg": [">", 1.6], + "Email":[">", "g"] + }, + "link": { + "direction": "any", + "ref": 15 + }, + "results": { -{ - "FindEntity" : { - "class" : "Patient", - "constraints": { - "Age": [">", 0, "<=", 180 ] - }, + "list":["Age","Name", "Email"], + "limit" :3 + + } + } + }, + { + "FindEntity" : { + "class" : "Patient", + "constraints": { + "Email":[">", "g"] + }, + "link": { + "direction": "any", + "ref": 1 + }, "results": { - "list":["Name","Age"] - } - } -}, - - { - "FindEntity" : { - "class" : "Patient", - "constraints": { - "avg": [">", 1.6], - "Email":[">", "g"] - }, - "link": { - "direction": "any", - "ref": 15 - }, - "results": { - - "list":["Age","Name", "Email"], - "limit" :3 - - } - } -}, -{ - "FindEntity" : { - "class" : "Patient", - "constraints": { - "Email":[">", "g"] - }, - "link": { - "direction": "any", - "ref": 1 - }, - "results": { - - "list":["Age","Name", "Email"], - "sort":"Email" - } - } -}, - -{ - "FindEntity" : { - "class" : "Patient", - "constraints": { - "Age": [">", 0, "<=", 100 ] - }, - "results": { - "list":["Name","Age","Email"], - "sort" :"Age" - } - } -}, -{ - "FindEntity" : { - "class" : "Patient", - "link": { - "direction": "any", - "ref":16 - }, - "results": { + "list":["Age","Name", "Email"], + "sort":"Email" + } + } + }, + + { + "FindEntity" : { + "class" : "Patient", + "constraints": { + "Age": [">", 0, "<=", 100 ] + }, + "results": { + "list":["Name","Age","Email", "Study"], + "sort" :"Age" + } + } + }, + { + "FindEntity" : { + "class" : "Patient", + + "link": { + "direction": "any", + "ref":16 + }, + "results": { - "list":["Age","Name", "Email", "Birthday"], - "limit":2 + "list":["Age","Name", "Email", "Birthday"], + "limit":2 - } - } -} + } + } + }, + { + "FindConnection" : { + "_ref" : 25, + "class" : "Married", + "constraints": { + "city": ["==", "Boston" ] + }, + "results": { + "list":["location", "city"] + } + } + }, + { + "UpdateConnection" : { + "_ref" : 25, + + "properties": { + "city" : "Atlanta" + } + } + } ] diff --git a/tests/AddFindUpdate.json b/tests/AddFindUpdate.json new file mode 100644 index 00000000..b5d62c3a --- /dev/null +++ b/tests/AddFindUpdate.json @@ -0,0 +1,70 @@ +[ + { + "AddEntity": + { + + "_ref": 1, + "class": "Patient", + + "properties": { + "Email":"rst@rashed.com", + "Age": 86, + "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, + "Name":"Mark", + "fv": {"_blob":"Raghed----ghjhsglfhwa"} + } + } + + }, + { + "AddEntity": + { + "_ref": 2, + "class": "Patient", + + "properties": { + "Email":"gst@raghehed.com1", + "Age": 80, + "Birthday":{"_date":"Sat Oct 1 17:59:24 PDT 1936"}, + "fv":{"_blob" :"Ahmad----ghjhsglfhwa"}, + "Name": "Kai" + + } + } + }, + { + "UpdateEntity" : { + "class" : "Patient", + + "constraints": { + "Name" : [ "==", "Kai" ] + }, + + "properties": { + "City" : "Atlanta" + } + } + }, + { + "UpdateEntity" : { + "_ref" : 1, + + "properties": { + "City" : "Houston", + "Name" : "Mark Hammond" + }, + + "remove_props" : [ "fv" ] + } + }, + { + "FindEntity" : { + "constraints" : { + "Age" : [ ">=", 80 ] + }, + "results" : { + "list" : [ "fv" ] + } + } + } +] diff --git a/tests/SConscript b/tests/SConscript index 66425e9e..06707db0 100644 --- a/tests/SConscript +++ b/tests/SConscript @@ -14,9 +14,9 @@ testenv.Replace( ]) testenv.Append( - LIBS = ['gtest'], - LIBPATH = ['../utils/'] - ) + LIBS = ['gtest'], + LIBPATH = ['../utils/'] + ) testenv.MergeFlags(GetOption('cflags')) @@ -29,11 +29,15 @@ query_tests = testenv.Program( '../src/VDMSConfig.o', '../src/RSCommand.o', '../src/ImageCommand.o', + '../src/VideoCommand.o', '../src/ExceptionsCommand.o', '../src/PMGDIterators.o', '../src/PMGDQueryHandler.o', '../src/PMGDQuery.o', '../src/QueryMessage.o', + '../src/DescriptorsManager.o', + '../src/DescriptorsCommand.o', + "../src/BoundingBoxCommand.o", test_sources ], ) diff --git a/tests/cleandbs.sh b/tests/cleandbs.sh index 8ec70c16..c657c3c2 100644 --- a/tests/cleandbs.sh +++ b/tests/cleandbs.sh @@ -1 +1 @@ -rm -r jsongraph qhgraph simpleAdd_db simpleAddx10_db +rm -r jsongraph qhgraph simpleAdd_db simpleAddx10_db simpleUpdate_db diff --git a/tests/config-add10-tests.json b/tests/config-add10-tests.json index d20b8fb3..0fa2bb15 100644 --- a/tests/config-add10-tests.json +++ b/tests/config-add10-tests.json @@ -3,7 +3,7 @@ // Sets database paths and other parameters { // Network - "port": 55555, // Default is 55555 + "port": 55555, // Database paths "pmgd_path": "simpleAddx10_db" diff --git a/tests/config-addfind-tests.json b/tests/config-addfind-tests.json index f53364f5..e243bcec 100644 --- a/tests/config-addfind-tests.json +++ b/tests/config-addfind-tests.json @@ -3,7 +3,7 @@ // Sets database paths and other parameters { // Network - "port": 55555, // Default is 55555 + "port": 55555, // Database paths "pmgd_path": "jsongraph" diff --git a/tests/config-tests.json b/tests/config-tests.json index 1ff6b2c5..2d158362 100644 --- a/tests/config-tests.json +++ b/tests/config-tests.json @@ -3,8 +3,10 @@ // Sets database paths and other parameters { // Network - "port": 55555, // Default is 55555 + "port": 55555, // Database paths - "pmgd_path": "simpleAdd_db" + "pmgd_path": "simpleAdd_db", + + "more-info": "github.com/IntelLabs/vdms" } diff --git a/tests/config-update-tests.json b/tests/config-update-tests.json new file mode 100644 index 00000000..8765c8c4 --- /dev/null +++ b/tests/config-update-tests.json @@ -0,0 +1,12 @@ +// VDMS Config File +// This is the run-time config file +// Sets database paths and other parameters +{ + // Network + "port": 55555, + + // Database paths + "pmgd_path": "simpleUpdate_db", + + "more-info": "github.com/IntelLabs/vmds" +} diff --git a/tests/json_queries.cc b/tests/json_queries.cc index 53383eee..21aa819b 100644 --- a/tests/json_queries.cc +++ b/tests/json_queries.cc @@ -100,6 +100,64 @@ TEST(AddImage, simpleAdd) EXPECT_EQ(json_response[0]["AddImage"]["status"].asString(), "0"); VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(UpdateEntity, simpleAddUpdate) +{ + + Json::StyledWriter writer; + + std::ifstream ifile; + int fsize; + char * inBuf; + ifile.open("AddFindUpdate.json", std::ifstream::in); + ifile.seekg(0, std::ios::end); + fsize = (int)ifile.tellg(); + ifile.seekg(0, std::ios::beg); + inBuf = new char[fsize]; + ifile.read(inBuf, fsize); + std::string json_query = std::string(inBuf); + ifile.close(); + delete[] inBuf; + + Json::Reader reader; + Json::Value root; + Json::Value parsed; + + VDMSConfig::init("config-update-tests.json"); + PMGDQueryHandler::init(); + QueryHandler::init(); + + QueryHandler qh_base; + QueryHandlerTester query_handler(qh_base); + + VDMS::protobufs::queryMessage proto_query; + proto_query.set_json(json_query); + VDMS::protobufs::queryMessage response; + + query_handler.pq(proto_query, response ); + + reader.parse(response.json().c_str(), parsed); + // std::cout << writer.write(parsed) << std::endl; + + // Verify results returned. + for (int j = 0; j < parsed.size(); j++) { + const Json::Value& query = parsed[j]; + ASSERT_EQ(query.getMemberNames().size(), 1); + std::string cmd = query.getMemberNames()[0]; + + if (cmd == "UpdateEntity") + EXPECT_EQ(query[cmd]["count"].asInt(), 1); + if (cmd == "FindEntity") { + EXPECT_EQ(query[cmd]["returned"].asInt(), 2); + EXPECT_EQ(query["FindEntity"]["entities"][0]["fv"].asString(), + "Missing property"); + } + } + + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(AddImage, simpleAddx10) @@ -151,10 +209,11 @@ TEST(AddImage, simpleAddx10) EXPECT_EQ(json_response[i]["AddImage"]["status"].asString(), "0"); } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } -TEST(QueryHandler, AddAndFind){ - +TEST(QueryHandler, AddAndFind) +{ Json::StyledWriter writer; std::ifstream ifile; @@ -194,7 +253,7 @@ TEST(QueryHandler, AddAndFind){ if (cmd=="AddEntity") in_node_num++; - else if (cmd == "Connect") + else if (cmd == "AddConnection") in_edge_num++; else if (cmd == "FindEntity") { @@ -214,6 +273,12 @@ TEST(QueryHandler, AddAndFind){ } else if (query.isMember("properties")) in_props=query["properties"].size(); + else if (cmd == "FindConnection") + in_query_num++; + else if (cmd == "UpdateConnection") { + count_found_before=true; + in_edge_num++; + } } VDMSConfig::init("config-addfind-tests.json"); @@ -239,18 +304,35 @@ TEST(QueryHandler, AddAndFind){ if (cmd=="AddEntity") out_node_num++; - if (cmd=="Connect") + if (cmd=="AddConnection") + out_edge_num++; + if (cmd == "UpdateConnection") out_edge_num++; - if (cmd =="FindEntity") + if (cmd == "FindEntity" || cmd == "FindConnection") out_query_num++; - if (j == 12) { // Last FindEntiyu + if (j == 11) { // Second Last FindEntity + EXPECT_EQ(query["FindEntity"]["entities"][2]["Study"].asString(), + "Missing property"); + + EXPECT_EQ(query["FindEntity"]["entities"][3]["Study"].asString(), + "Missing property"); + } + + if (j == 12) { // Last FindEntiy EXPECT_EQ(query["FindEntity"]["entities"][0]["Birthday"].asString(), "1946-10-07T17:59:24-07:00"); EXPECT_EQ(query["FindEntity"]["entities"][1]["Birthday"].asString(), "1936-10-01T17:59:24-07:00"); } + if (j == 13) { // FindConnection + EXPECT_EQ(query["FindConnection"]["connections"][0]["location"].asString(), + "residence"); + + EXPECT_EQ(query["FindConnection"]["connections"][0]["city"].asString(), + "Boston"); + } if ( query[cmd]["status"] == 0) success++; @@ -282,4 +364,5 @@ TEST(QueryHandler, AddAndFind){ EXPECT_EQ(sum_found_before, sum_found_after); EXPECT_EQ(count_found_before, count_found_after); VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } diff --git a/tests/pmgd_queries.cc b/tests/pmgd_queries.cc index 64a3566d..2ee65abe 100644 --- a/tests/pmgd_queries.cc +++ b/tests/pmgd_queries.cc @@ -47,7 +47,7 @@ using namespace std; #define FEMALE 1 void add_patient(protobufs::Command &cmdadd, int id, string name, int age, - string dob, string email, int sex) + string dob, string email, int sex) { cmdadd.set_cmd_id(protobufs::Command::AddNode); protobufs::AddNode *an = cmdadd.mutable_add_node(); @@ -74,6 +74,10 @@ void add_patient(protobufs::Command &cmdadd, int id, string name, int age, p->set_type(protobufs::Property::IntegerType); p->set_key("Sex"); p->set_int_value(sex); + p = n->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("RemoveViaUpdate"); + p->set_string_value("Random"); } TEST(PMGDQueryHandler, addTest) @@ -95,14 +99,14 @@ TEST(PMGDQueryHandler, addTest) protobufs::Command cmdadd; cmdadd.set_tx_id(txid); add_patient(cmdadd, patientid++, "John Doe", 86, "Sat Nov 1 18:59:24 PDT 1930", - "john.doe@abc.com", MALE); + "john.doe@abc.com", MALE); cmds.push_back(&cmdadd); query_count++; protobufs::Command cmdadd1; cmdadd1.set_tx_id(txid); add_patient(cmdadd1, patientid++, "Jane Doe", 80, "Sat Oct 1 17:59:24 PDT 1936", - "jane.doe@abc.com", FEMALE); + "jane.doe@abc.com", FEMALE); cmds.push_back(&cmdadd1); query_count++; @@ -119,20 +123,24 @@ TEST(PMGDQueryHandler, addTest) p->set_type(protobufs::Property::TimeType); p->set_key("Since"); p->set_time_value("Sat Sep 1 19:59:24 PDT 1956"); + p = e->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Status"); + p->set_string_value("Old Adult"); cmds.push_back(&cmdedge1); query_count++; protobufs::Command cmdadd2; cmdadd2.set_tx_id(txid); add_patient(cmdadd2, patientid++, "Alice Crypto", 70, "Sat Nov 1 17:59:24 PDT 1946", - "alice.crypto@xyz.com", FEMALE); + "alice.crypto@xyz.com", FEMALE); cmds.push_back(&cmdadd2); query_count++; protobufs::Command cmdadd3; cmdadd3.set_tx_id(txid); add_patient(cmdadd3, patientid++, "Bob Crypto", 70, "Sat Nov 30 7:59:24 PDT 1946", - "bob.crypto@xyz.com", MALE); + "bob.crypto@xyz.com", MALE); cmds.push_back(&cmdadd3); query_count++; @@ -149,6 +157,10 @@ TEST(PMGDQueryHandler, addTest) p->set_type(protobufs::Property::TimeType); p->set_key("Since"); p->set_time_value("Wed Dec 2 19:59:24 PDT 1970"); + p = e->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Status"); + p->set_string_value("Old Adult"); cmds.push_back(&cmdedge2); query_count++; @@ -158,12 +170,12 @@ TEST(PMGDQueryHandler, addTest) cmds.push_back(&cmdtxcommit); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, false); int nodeids = 1, edgeids = 1; for (int i = 0; i < query_count; ++i) { vector response = responses[i]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << "Unsuccessful TX"; + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << "Unsuccessful TX"; if (it->r_type() == protobufs::NodeID) { long nodeid = it->op_int_value(); EXPECT_EQ(nodeid, nodeids++) << "Unexpected node id"; @@ -176,27 +188,28 @@ TEST(PMGDQueryHandler, addTest) } } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } void print_property(const string &key, const protobufs::Property &p) { #ifdef PRINT_PROPERTY switch(p.type()) { - case protobufs::Property::BooleanType: - printf("key: %s, value: %d\n", key.c_str(), p.bool_value()); - break; - case protobufs::Property::IntegerType: - printf("key: %s, value: %ld\n", key.c_str(), p.int_value()); - break; - case protobufs::Property::StringType: - case protobufs::Property::TimeType: - printf("key: %s, value: %s\n", key.c_str(), p.string_value().c_str()); - break; - case protobufs::Property::FloatType: - printf("key: %s, value: %lf\n", key.c_str(), p.float_value()); - break; - default: - printf("Unknown\n"); + case protobufs::Property::BooleanType: + printf("key: %s, value: %d\n", key.c_str(), p.bool_value()); + break; + case protobufs::Property::IntegerType: + printf("key: %s, value: %ld\n", key.c_str(), p.int_value()); + break; + case protobufs::Property::StringType: + case protobufs::Property::TimeType: + printf("key: %s, value: %s\n", key.c_str(), p.string_value().c_str()); + break; + case protobufs::Property::FloatType: + printf("key: %s, value: %lf\n", key.c_str(), p.float_value()); + break; + default: + printf("Unknown\n"); } #endif } @@ -221,10 +234,12 @@ TEST(PMGDQueryHandler, queryTestList) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(-1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Email"); pp->set_op(protobufs::PropertyPredicate::Gt); protobufs::Property *p = pp->mutable_v1(); @@ -232,10 +247,10 @@ TEST(PMGDQueryHandler, queryTestList) // I think the key is not required here. p->set_key("Email"); p->set_string_value("j"); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Email"; - key = qn->add_response_keys(); + key = qr->add_response_keys(); *key = "Age"; cmds.push_back(&cmdquery); query_count++; @@ -248,12 +263,12 @@ TEST(PMGDQueryHandler, queryTestList) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount, propcount = 0; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { auto mymap = it->prop_values(); for(auto m_it : mymap) { @@ -274,6 +289,7 @@ TEST(PMGDQueryHandler, queryTestList) EXPECT_EQ(propcount, 2) << "Not enough properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryTestAverage) @@ -281,7 +297,7 @@ TEST(PMGDQueryHandler, queryTestAverage) VDMSConfig::init("config-pmgd-tests.json"); PMGDQueryHandler::init(); PMGDQueryHandler qh; - + vector cmds; { @@ -296,10 +312,12 @@ TEST(PMGDQueryHandler, queryTestAverage) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(-1); - qn->set_tag("Patient"); - qn->set_r_type(protobufs::Average); - string *key = qn->add_response_keys(); + qc->set_tag("Patient"); + qr->set_r_type(protobufs::Average); + string *key = qr->add_response_keys(); *key = "Age"; cmds.push_back(&cmdquery); query_count++; @@ -312,11 +330,11 @@ TEST(PMGDQueryHandler, queryTestAverage) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); for (int i = 0; i < query_count; ++i) { vector response = responses[i]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::Average) { EXPECT_EQ(it->op_float_value(), 76.5) << "Average didn't match expected for four patients' age"; } @@ -324,6 +342,7 @@ TEST(PMGDQueryHandler, queryTestAverage) } } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryTestUnique) @@ -331,7 +350,7 @@ TEST(PMGDQueryHandler, queryTestUnique) VDMSConfig::init("config-pmgd-tests.json"); PMGDQueryHandler::init(); PMGDQueryHandler qh; - + vector cmds; { @@ -348,11 +367,13 @@ TEST(PMGDQueryHandler, queryTestUnique) cmdquery.set_tx_id(txid); cmdquery.set_cmd_grp_id(query_count); protobufs::QueryNode *qn = cmdquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(-1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - qn->set_unique(true); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + qc->set_unique(true); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Email"); pp->set_op(protobufs::PropertyPredicate::Gt); protobufs::Property *p = pp->mutable_v1(); @@ -360,8 +381,8 @@ TEST(PMGDQueryHandler, queryTestUnique) // I think the key is not required here. p->set_key("Email"); p->set_string_value("j"); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Email"; cmds.push_back(&cmdquery); query_count++; @@ -375,8 +396,8 @@ TEST(PMGDQueryHandler, queryTestUnique) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); - ASSERT_EQ(responses.size(), 1) << "Expecting an error return situation"; + vector> responses = qh.process_queries(cmds, query_count, true); + EXPECT_EQ(responses.size(), 1) << "Expecting an error return situation"; for (int i = 0; i < responses.size(); ++i) { vector response = responses[i]; for (auto it : response) { @@ -386,6 +407,7 @@ TEST(PMGDQueryHandler, queryTestUnique) } } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryNeighborTestList) @@ -409,10 +431,12 @@ TEST(PMGDQueryHandler, queryNeighborTestList) cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); cmdstartquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Sex"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -427,6 +451,8 @@ TEST(PMGDQueryHandler, queryNeighborTestList) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); qn = cmdquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(-1); protobufs::LinkInfo *qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -435,11 +461,11 @@ TEST(PMGDQueryHandler, queryNeighborTestList) qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_p_op(protobufs::And); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qc->set_p_op(protobufs::And); + qc->set_tagid(0); + qc->set_unique(false); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Name"; cmds.push_back(&cmdquery); query_count++; @@ -452,12 +478,12 @@ TEST(PMGDQueryHandler, queryNeighborTestList) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount, propcount = 0; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { auto mymap = it->prop_values(); for(auto m_it : mymap) { @@ -478,6 +504,7 @@ TEST(PMGDQueryHandler, queryNeighborTestList) EXPECT_EQ(propcount, 1) << "Not enough properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryConditionalNeighborTestList) @@ -501,10 +528,12 @@ TEST(PMGDQueryHandler, queryConditionalNeighborTestList) cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); cmdstartquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Sex"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -519,6 +548,8 @@ TEST(PMGDQueryHandler, queryConditionalNeighborTestList) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); qn = cmdquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(-1); protobufs::LinkInfo *qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -527,9 +558,9 @@ TEST(PMGDQueryHandler, queryConditionalNeighborTestList) qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + pp = qc->add_predicates(); pp->set_key("Age"); pp->set_op(protobufs::PropertyPredicate::Lt); p = pp->mutable_v1(); @@ -538,9 +569,9 @@ TEST(PMGDQueryHandler, queryConditionalNeighborTestList) p->set_key("Age"); p->set_int_value(80); - qn->set_unique(false); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qc->set_unique(false); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Name"; cmds.push_back(&cmdquery); query_count++; @@ -553,12 +584,12 @@ TEST(PMGDQueryHandler, queryConditionalNeighborTestList) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount, propcount = 0; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { auto mymap = it->prop_values(); for(auto m_it : mymap) { @@ -579,6 +610,7 @@ TEST(PMGDQueryHandler, queryConditionalNeighborTestList) EXPECT_EQ(propcount, 1) << "Not enough properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryNeighborTestSum) @@ -602,12 +634,14 @@ TEST(PMGDQueryHandler, queryNeighborTestSum) cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); cmdstartquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); // Set parameters to find the starting node(s) qn->set_identifier(1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - qn->set_unique(false); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + qc->set_unique(false); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Sex"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -622,6 +656,8 @@ TEST(PMGDQueryHandler, queryNeighborTestSum) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); qn = cmdquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(-1); protobufs::LinkInfo *qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -629,10 +665,10 @@ TEST(PMGDQueryHandler, queryNeighborTestSum) qnb->set_e_tag("Married"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::Sum); - string *key = qn->add_response_keys(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::Sum); + string *key = qr->add_response_keys(); *key = "Age"; cmds.push_back(&cmdquery); query_count++; @@ -645,12 +681,12 @@ TEST(PMGDQueryHandler, queryNeighborTestSum) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount, propcount = 0; for (int i = 0; i < query_count; ++i) { vector response = responses[i]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::Sum) { EXPECT_EQ(it->op_int_value(), 150) << "Sum didn't match expected for two patients' age"; } @@ -658,6 +694,7 @@ TEST(PMGDQueryHandler, queryNeighborTestSum) } } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, addConstrainedTest) @@ -681,16 +718,18 @@ TEST(PMGDQueryHandler, addConstrainedTest) cmdadd.set_tx_id(txid); cmdadd.set_cmd_grp_id(query_count); add_patient(cmdadd, patientid, "John Doe", 86, "Sat Nov 1 18:59:24 PDT 1930", - "john.doe@abc.com", MALE); + "john.doe@abc.com", MALE); // Add a test to verify this node doesn't exist protobufs::AddNode *an = cmdadd.mutable_add_node(); protobufs::QueryNode *qn = an->mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(patientid++); // ref for caching in case found. - qn->set_tag("Patient"); - qn->set_unique(true); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::NodeID); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_unique(true); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::NodeID); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Email"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -705,7 +744,7 @@ TEST(PMGDQueryHandler, addConstrainedTest) cmdadd1.set_tx_id(txid); cmdadd1.set_cmd_grp_id(query_count); add_patient(cmdadd1, patientid++, "Janice Doe", 40, "Fri Oct 1 1:59:24 PDT 1976", - "janice.doe@abc.com", FEMALE); + "janice.doe@abc.com", FEMALE); cmds.push_back(&cmdadd1); query_count++; @@ -719,6 +758,10 @@ TEST(PMGDQueryHandler, addConstrainedTest) e->set_src(1); e->set_dst(2); e->set_tag("Daughter"); + p = e->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Status"); + p->set_string_value("Young Adult"); cmds.push_back(&cmdedge1); query_count++; @@ -729,23 +772,24 @@ TEST(PMGDQueryHandler, addConstrainedTest) cmds.push_back(&cmdtxcommit); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, false); // Since PMGD queries always generate one response per command, // we can do the following: protobufs::CommandResponse *resp = responses[0][0]; // TxBegin - ASSERT_EQ(resp->error_code(), protobufs::CommandResponse::Success) << "Unsuccessful TX"; + EXPECT_EQ(resp->error_code(), protobufs::CommandResponse::Success) << "Unsuccessful TX"; resp = responses[1][0]; // Conditional add - ASSERT_EQ(resp->error_code(), protobufs::CommandResponse::Exists) << resp->error_msg(); + EXPECT_EQ(resp->error_code(), protobufs::CommandResponse::Exists) << resp->error_msg(); EXPECT_EQ(resp->op_int_value(), 1) << "Unexpected node id for conditional add"; resp = responses[2][0]; // Regular add - ASSERT_EQ(resp->error_code(), protobufs::CommandResponse::Success) << resp->error_msg(); + EXPECT_EQ(resp->error_code(), protobufs::CommandResponse::Success) << resp->error_msg(); EXPECT_EQ(resp->op_int_value(), 5) << "Unexpected node id for add"; resp = responses[3][0]; // Regular add edge - ASSERT_EQ(resp->error_code(), protobufs::CommandResponse::Success) << resp->error_msg(); + EXPECT_EQ(resp->error_code(), protobufs::CommandResponse::Success) << resp->error_msg(); EXPECT_EQ(resp->op_int_value(), 3) << "Unexpected edge id for add"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryNeighborLinksTestList) @@ -769,10 +813,12 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); cmdstartquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Sex"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -787,6 +833,8 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); qn = cmdquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(2); protobufs::LinkInfo *qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -794,9 +842,9 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) qnb->set_e_tag("Married"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_p_op(protobufs::And); + qc->set_tagid(0); + qc->set_unique(false); + qc->set_p_op(protobufs::And); cmds.push_back(&cmdquery); query_count++; @@ -804,6 +852,8 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) cmdfollquery.set_cmd_id(protobufs::Command::QueryNode); cmdfollquery.set_tx_id(txid); qn = cmdfollquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(-1); qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -811,11 +861,11 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) qnb->set_e_tag("Daughter"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qc->set_tagid(0); + qc->set_unique(false); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Name"; cmds.push_back(&cmdfollquery); query_count++; @@ -828,12 +878,12 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount, propcount = 0; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { auto mymap = it->prop_values(); for(auto m_it : mymap) { @@ -854,6 +904,7 @@ TEST(PMGDQueryHandler, queryNeighborLinksTestList) EXPECT_EQ(propcount, 1) << "Not enough properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) @@ -877,10 +928,12 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); cmdstartquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Sex"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -888,8 +941,8 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) // I think the key is not required here. p->set_key("Sex"); p->set_int_value(FEMALE); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Email"; cmds.push_back(&cmdstartquery); query_count++; @@ -898,6 +951,8 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); qn = cmdquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(2); protobufs::LinkInfo *qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -905,10 +960,10 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) qnb->set_e_tag("Married"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::Count); + qc->set_tagid(0); + qc->set_unique(false); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::Count); cmds.push_back(&cmdquery); query_count++; @@ -916,6 +971,8 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) cmdfollquery.set_cmd_id(protobufs::Command::QueryNode); cmdfollquery.set_tx_id(txid); qn = cmdfollquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(-1); qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -923,13 +980,13 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) qnb->set_e_tag("Daughter"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::List); - key = qn->add_response_keys(); + qc->set_tagid(0); + qc->set_unique(false); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::List); + key = qr->add_response_keys(); *key = "Name"; - key = qn->add_response_keys(); + key = qr->add_response_keys(); *key = "Email"; cmds.push_back(&cmdfollquery); query_count++; @@ -942,13 +999,13 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount = 0, propcount = 0; int totnodecount = 0, totpropcount = 0; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { propcount = 0; auto mymap = it->prop_values(); @@ -977,6 +1034,7 @@ TEST(PMGDQueryHandler, queryNeighborLinksReuseTestList) EXPECT_EQ(totpropcount, 3) << "Not enough total properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) @@ -1000,10 +1058,12 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); cmdstartquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - protobufs::PropertyPredicate *pp = qn->add_predicates(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); pp->set_key("Sex"); pp->set_op(protobufs::PropertyPredicate::Eq); protobufs::Property *p = pp->mutable_v1(); @@ -1011,11 +1071,11 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) // I think the key is not required here. p->set_key("Sex"); p->set_int_value(FEMALE); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Email"; - qn->set_sort(true); - qn->set_sort_key("Email"); + qr->set_sort(true); + qr->set_sort_key("Email"); cmds.push_back(&cmdstartquery); query_count++; @@ -1023,6 +1083,8 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); qn = cmdquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(2); protobufs::LinkInfo *qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -1030,10 +1092,10 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) qnb->set_e_tag("Married"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::Count); + qc->set_tagid(0); + qc->set_unique(false); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::Count); cmds.push_back(&cmdquery); query_count++; @@ -1041,6 +1103,8 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) cmdfollquery.set_cmd_id(protobufs::Command::QueryNode); cmdfollquery.set_tx_id(txid); qn = cmdfollquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); qn->set_identifier(-1); qnb = qn->mutable_link(); // Now set parameters for neighbor traversal @@ -1048,13 +1112,13 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) qnb->set_e_tag("Daughter"); qnb->set_dir(protobufs::LinkInfo::Any); qnb->set_nb_unique(false); - qn->set_tagid(0); - qn->set_unique(false); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::List); - key = qn->add_response_keys(); + qc->set_tagid(0); + qc->set_unique(false); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::List); + key = qr->add_response_keys(); *key = "Name"; - key = qn->add_response_keys(); + key = qr->add_response_keys(); *key = "Email"; cmds.push_back(&cmdfollquery); query_count++; @@ -1067,14 +1131,14 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount = 0, propcount = 0; int totnodecount = 0, totpropcount = 0; bool firstquery = true; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { propcount = 0; auto mymap = it->prop_values(); @@ -1107,6 +1171,7 @@ TEST(PMGDQueryHandler, querySortedNeighborLinksReuseTestList) EXPECT_EQ(totpropcount, 3) << "Not enough total properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryTestListLimit) @@ -1129,15 +1194,17 @@ TEST(PMGDQueryHandler, queryTestListLimit) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(-1); - qn->set_tag("Patient"); - qn->set_p_op(protobufs::And); - qn->set_r_type(protobufs::List); - string *key = qn->add_response_keys(); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); *key = "Email"; - key = qn->add_response_keys(); + key = qr->add_response_keys(); *key = "Age"; - qn->set_limit(4); + qr->set_limit(4); cmds.push_back(&cmdquery); query_count++; @@ -1149,12 +1216,12 @@ TEST(PMGDQueryHandler, queryTestListLimit) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); int nodecount, propcount = 0; - for (int i = 0; i < query_count; ++i) { - vector response = responses[i]; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::List) { auto mymap = it->prop_values(); for(auto m_it : mymap) { @@ -1175,6 +1242,7 @@ TEST(PMGDQueryHandler, queryTestListLimit) EXPECT_EQ(propcount, 2) << "Not enough properties read"; } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } TEST(PMGDQueryHandler, queryTestSortedLimitedAverage) @@ -1197,15 +1265,17 @@ TEST(PMGDQueryHandler, queryTestSortedLimitedAverage) cmdquery.set_cmd_id(protobufs::Command::QueryNode); cmdquery.set_tx_id(txid); protobufs::QueryNode *qn = cmdquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); qn->set_identifier(-1); - qn->set_tag("Patient"); - qn->set_r_type(protobufs::Average); - string *key = qn->add_response_keys(); + qc->set_tag("Patient"); + qr->set_r_type(protobufs::Average); + string *key = qr->add_response_keys(); *key = "Age"; - qn->set_sort(true); - qn->set_sort_key("Email"); + qr->set_sort(true); + qr->set_sort_key("Email"); // Average over 5 patients age is 69.2 - qn->set_limit(3); + qr->set_limit(3); cmds.push_back(&cmdquery); query_count++; @@ -1217,11 +1287,11 @@ TEST(PMGDQueryHandler, queryTestSortedLimitedAverage) cmds.push_back(&cmdtxend); query_count++; - vector> responses = qh.process_queries(cmds, query_count); + vector> responses = qh.process_queries(cmds, query_count, true); for (int i = 0; i < query_count; ++i) { vector response = responses[i]; for (auto it : response) { - ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); if (it->r_type() == protobufs::Average) { EXPECT_EQ(static_cast(it->op_float_value()), 73) << "Average didn't match expected for three middle patients' age"; } @@ -1229,4 +1299,771 @@ TEST(PMGDQueryHandler, queryTestSortedLimitedAverage) } } VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryUpdateTest) +{ + //printf("Testing PMGD query protobuf handler for list return of neighbors with constraints\n"); + + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + // Set parameters to find the starting node(s) + protobufs::Command cmdstartquery; + cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); + cmdstartquery.set_tx_id(txid); + protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(1); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); + pp->set_key("Sex"); + pp->set_op(protobufs::PropertyPredicate::Eq); + protobufs::Property *p = pp->mutable_v1(); + p->set_type(protobufs::Property::IntegerType); + // I think the key is not required here. + p->set_key("Sex"); + p->set_int_value(MALE); + cmds.push_back(&cmdstartquery); + query_count++; + + protobufs::Command cmdupdate; + cmdupdate.set_cmd_id(protobufs::Command::UpdateNode); + cmdupdate.set_tx_id(txid); + protobufs::UpdateNode *un = cmdupdate.mutable_update_node(); + + // The identifier here will be the identifier used for search + // since we are going to update properties of the nodes found + // in the previous search + un->set_identifier(qn->identifier()); + p = un->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Hospital"); + p->set_string_value("Kaiser1"); + p = un->add_properties(); + p->set_type(protobufs::Property::BooleanType); + p->set_key("Treated"); + p->set_bool_value(true); + + // Remove the extra properties + un->add_remove_props("RemoveViaUpdate"); + + cmds.push_back(&cmdupdate); + query_count++; + + // Also make sure the removed property doesn't show up anymore + protobufs::Command cmdcheckquery; + cmdcheckquery.set_cmd_id(protobufs::Command::QueryNode); + cmdcheckquery.set_tx_id(txid); + qn = cmdcheckquery.mutable_query_node(); + qc = qn->mutable_constraints(); + qr = qn->mutable_results(); + qn->set_identifier(-1); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + pp = qc->add_predicates(); + pp->set_key("RemoveViaUpdate"); + pp->set_op(protobufs::PropertyPredicate::Eq); + p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("RemoveViaUpdate"); + p->set_string_value("Random"); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); + *key = "Email"; + cmds.push_back(&cmdcheckquery); + query_count++; + + // No need to commit in this case. So just end TX + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, false); + for (int i = 0; i < query_count; ++i) { + vector response = responses[i]; + for (auto it : response) { + ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::Count) { + EXPECT_EQ(it->op_int_value(), 2) << "Doesn't match expected count"; + } + if (it->r_type() == protobufs::List) { + EXPECT_EQ(it->op_int_value(), 3) << "Doesn't match expected count for prop match"; + } + //printf("\n"); + } + } + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryUpdateConstraintTest) +{ + //printf("Testing PMGD query protobuf handler for list return of neighbors with constraints\n"); + + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + // Try with constraints inside the update command + protobufs::Command cmdupdate; + cmdupdate.set_cmd_id(protobufs::Command::UpdateNode); + cmdupdate.set_tx_id(txid); + protobufs::UpdateNode *un = cmdupdate.mutable_update_node(); + un->set_identifier(1); + + // Set parameters to find the starting node(s) + protobufs::QueryNode *qn = un->mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(un->identifier()); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); + pp->set_key("Sex"); + pp->set_op(protobufs::PropertyPredicate::Eq); + protobufs::Property *p = pp->mutable_v1(); + p->set_type(protobufs::Property::IntegerType); + // I think the key is not required here. + p->set_key("Sex"); + p->set_int_value(FEMALE); + + // Set properties to be updated when nodes are found. + p = un->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Hospital"); + p->set_string_value("Kaiser2"); + p = un->add_properties(); + p->set_type(protobufs::Property::BooleanType); + p->set_key("Treated"); + p->set_bool_value(true); + + cmds.push_back(&cmdupdate); + query_count++; + + // No need to commit in this case. So just end TX + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, false); + for (int i = 0; i < query_count; ++i) { + vector response = responses[i]; + for (auto it : response) { + ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::Count) { + EXPECT_EQ(it->op_int_value(), 3) << "Doesn't match expected count"; + } + //printf("\n"); + } + } + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryEdgeTestList) +{ + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + protobufs::Command cmdquery; + cmdquery.set_cmd_id(protobufs::Command::QueryEdge); + cmdquery.set_tx_id(txid); + protobufs::QueryEdge *qn = cmdquery.mutable_query_edge(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(-1); + qc->set_tag(""); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); + pp->set_key("Status"); + pp->set_op(protobufs::PropertyPredicate::Eq); + protobufs::Property *p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Status"); + p->set_string_value("Young Adult"); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); + *key = "Status"; + cmds.push_back(&cmdquery); + query_count++; + + // No need to commit in this case. So just end TX + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, true); + int edgecount, propcount = 0; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; + for (auto it : response) { + ASSERT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::List) { + auto mymap = it->prop_values(); + for(auto m_it : mymap) { + // Assuming string for now + protobufs::PropertyList &p = m_it.second; + edgecount = 0; + for (int i = 0; i < p.values_size(); ++i) { + print_property(m_it.first, p.values(i)); + edgecount++; + } + propcount++; + } + } + //printf("\n"); + } + } + EXPECT_EQ(edgecount, 1) << "Not enough edges found"; + EXPECT_EQ(propcount, 1) << "Not enough properties read"; + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryEdgeTestSortList) +{ + // Way to test the reusable iterator + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + protobufs::Command cmdquery; + cmdquery.set_cmd_id(protobufs::Command::QueryEdge); + cmdquery.set_tx_id(txid); + protobufs::QueryEdge *qn = cmdquery.mutable_query_edge(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(-1); + qc->set_tag(""); + qc->set_p_op(protobufs::And); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); + *key = "Status"; + key = qr->add_response_keys(); + *key = "Since"; + qr->set_sort(true); + qr->set_sort_key("Status"); + cmds.push_back(&cmdquery); + query_count++; + + // No need to commit in this case. So just end TX + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, true); + int edgecount, propcount = 0; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; + for (auto it : response) { + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::List) { + auto mymap = it->prop_values(); + for(auto m_it : mymap) { + // Assuming string for now + protobufs::PropertyList &p = m_it.second; + edgecount = 0; + for (int i = 0; i < p.values_size(); ++i) { + if (m_it.first == "Status") { + if (i <= 1) + EXPECT_EQ(p.values(i).string_value(), "Old Adult"); + else + EXPECT_EQ(p.values(i).string_value(), "Young Adult"); + } + print_property(m_it.first, p.values(i)); + edgecount++; + } + propcount++; + } + } + //printf("\n"); + } + } + EXPECT_EQ(edgecount, 3) << "Not enough edges found"; + EXPECT_EQ(propcount, 2) << "Not enough properties read"; + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryNodeEdgeTestList) +{ + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + // Constrain the starting nodes for the edge we want to access + protobufs::Command cmdstartquery; + cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); + cmdstartquery.set_tx_id(txid); + protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(1); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); + pp->set_key("Email"); + pp->set_op(protobufs::PropertyPredicate::Eq); + protobufs::Property *p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Email"); + p->set_string_value("john.doe@abc.com"); + cmds.push_back(&cmdstartquery); + query_count++; + + protobufs::Command cmdquery; + cmdquery.set_cmd_id(protobufs::Command::QueryEdge); + cmdquery.set_tx_id(txid); + protobufs::QueryEdge *qe = cmdquery.mutable_query_edge(); + qc = qe->mutable_constraints(); + qr = qe->mutable_results(); + qe->set_identifier(-1); + qe->set_src_node_id(1); + qe->set_dest_node_id(-1); + qc->set_tag(""); + qc->set_p_op(protobufs::And); + pp = qc->add_predicates(); + pp->set_key("Status"); + pp->set_op(protobufs::PropertyPredicate::Eq); + p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Status"); + p->set_string_value("Old Adult"); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); + *key = "Status"; + cmds.push_back(&cmdquery); + query_count++; + + // No need to commit in this case. So just end TX + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, true); + int edgecount, propcount = 0; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; + for (auto it : response) { + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::List) { + auto mymap = it->prop_values(); + for(auto m_it : mymap) { + // Assuming string for now + protobufs::PropertyList &p = m_it.second; + edgecount = 0; + for (int i = 0; i < p.values_size(); ++i) { + print_property(m_it.first, p.values(i)); + edgecount++; + } + propcount++; + } + } + //printf("\n"); + } + } + EXPECT_EQ(edgecount, 1) << "Not enough edges found"; + EXPECT_EQ(propcount, 1) << "Not enough properties read"; + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryNodeEdgeDestTestList) +{ + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + // Constrain the starting nodes for the edge we want to access + protobufs::Command cmdstartquery; + cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); + cmdstartquery.set_tx_id(txid); + protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(1); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); + pp->set_key("Email"); + pp->set_op(protobufs::PropertyPredicate::Eq); + protobufs::Property *p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Email"); + p->set_string_value("john.doe@abc.com"); + cmds.push_back(&cmdstartquery); + query_count++; + + protobufs::Command cmdadd; + cmdadd.set_tx_id(txid); + add_patient(cmdadd, 2, "Jane Foster", 70, "Tue Oct 1 13:59:24 PDT 1946", + "jane.foster@pqr.com", FEMALE); + cmds.push_back(&cmdadd); + query_count++; + + protobufs::Command cmdedge; + cmdedge.set_tx_id(txid); + cmdedge.set_cmd_id(protobufs::Command::AddEdge); + protobufs::AddEdge *ae = cmdedge.mutable_add_edge(); + ae->set_identifier(-1); + protobufs::Edge *e = ae->mutable_edge(); + e->set_src(1); + e->set_dst(2); + e->set_tag("Friend"); + p = e->add_properties(); + p->set_type(protobufs::Property::TimeType); + p->set_key("Since"); + p->set_time_value("Sat Sep 1 19:59:24 PDT 1956"); + p = e->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Status"); + p->set_string_value("Old Adult"); + cmds.push_back(&cmdedge); + query_count++; + + protobufs::Command cmdquery; + cmdquery.set_cmd_id(protobufs::Command::QueryEdge); + cmdquery.set_tx_id(txid); + protobufs::QueryEdge *qe = cmdquery.mutable_query_edge(); + qc = qe->mutable_constraints(); + qr = qe->mutable_results(); + qe->set_identifier(-1); + qe->set_src_node_id(1); + qe->set_dest_node_id(2); + qc->set_tag(""); + qc->set_p_op(protobufs::And); + pp = qc->add_predicates(); + pp->set_key("Status"); + pp->set_op(protobufs::PropertyPredicate::Eq); + p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Status"); + p->set_string_value("Old Adult"); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); + *key = "Status"; + cmds.push_back(&cmdquery); + query_count++; + + // No need to commit in this case. So just end TX + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, false); + int edgecount, propcount = 0; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; + for (auto it : response) { + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::List) { + auto mymap = it->prop_values(); + for(auto m_it : mymap) { + // Assuming string for now + protobufs::PropertyList &p = m_it.second; + edgecount = 0; + for (int i = 0; i < p.values_size(); ++i) { + print_property(m_it.first, p.values(i)); + edgecount++; + } + propcount++; + } + } + //printf("\n"); + } + } + EXPECT_EQ(edgecount, 1) << "Not enough edges found"; + EXPECT_EQ(propcount, 1) << "Not enough properties read"; + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); +} + +TEST(PMGDQueryHandler, queryUpdateEdge) +{ + VDMSConfig::init("config-pmgd-tests.json"); + PMGDQueryHandler::init(); + PMGDQueryHandler qh; + + vector cmds; + + { + int txid = 1, query_count = 0; + protobufs::Command cmdtx; + cmdtx.set_cmd_id(protobufs::Command::TxBegin); + cmdtx.set_tx_id(txid); + cmds.push_back(&cmdtx); + query_count++; + + // Constrain the starting nodes for the edge we want to access + protobufs::Command cmdstartquery; + cmdstartquery.set_cmd_id(protobufs::Command::QueryNode); + cmdstartquery.set_tx_id(txid); + protobufs::QueryNode *qn = cmdstartquery.mutable_query_node(); + protobufs::Constraints *qc = qn->mutable_constraints(); + protobufs::ResultInfo *qr = qn->mutable_results(); + qn->set_identifier(1); + qc->set_tag("Patient"); + qc->set_p_op(protobufs::And); + protobufs::PropertyPredicate *pp = qc->add_predicates(); + pp->set_key("Email"); + pp->set_op(protobufs::PropertyPredicate::Eq); + protobufs::Property *p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Email"); + p->set_string_value("john.doe@abc.com"); + cmds.push_back(&cmdstartquery); + query_count++; + + protobufs::Command cmdadd; + cmdadd.set_tx_id(txid); + add_patient(cmdadd, 2, "Jane Foster", 70, "Tue Oct 1 13:59:24 PDT 1946", + "jane.foster@pqr.com", FEMALE); + cmds.push_back(&cmdadd); + query_count++; + + protobufs::Command cmdedge; + cmdedge.set_tx_id(txid); + cmdedge.set_cmd_id(protobufs::Command::AddEdge); + protobufs::AddEdge *ae = cmdedge.mutable_add_edge(); + ae->set_identifier(-1); + protobufs::Edge *e = ae->mutable_edge(); + e->set_src(1); + e->set_dst(2); + e->set_tag("Friend"); + p = e->add_properties(); + p->set_type(protobufs::Property::TimeType); + p->set_key("Since"); + p->set_time_value("Sat Sep 1 19:59:24 PDT 1956"); + p = e->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Status"); + p->set_string_value("Old Adult"); + cmds.push_back(&cmdedge); + query_count++; + + protobufs::Command cmdquery; + cmdquery.set_cmd_id(protobufs::Command::QueryEdge); + cmdquery.set_tx_id(txid); + protobufs::QueryEdge *qe = cmdquery.mutable_query_edge(); + qc = qe->mutable_constraints(); + qr = qe->mutable_results(); + qe->set_identifier(10); + qe->set_src_node_id(1); + qe->set_dest_node_id(2); + qc->set_tag(""); + qc->set_p_op(protobufs::And); + pp = qc->add_predicates(); + pp->set_key("Status"); + pp->set_op(protobufs::PropertyPredicate::Eq); + p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Status"); + p->set_string_value("Old Adult"); + qr->set_r_type(protobufs::List); + string *key = qr->add_response_keys(); + *key = "Status"; + cmds.push_back(&cmdquery); + query_count++; + + protobufs::Command cmdupdate; + cmdupdate.set_cmd_id(protobufs::Command::UpdateEdge); + cmdupdate.set_tx_id(txid); + protobufs::UpdateEdge *ue = cmdupdate.mutable_update_edge(); + + // The identifier here will be the identifier used for search + // since we are going to update properties of the edge found + // in the previous search + ue->set_identifier(10); + p = ue->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("StartHospital"); + p->set_string_value("Kaiser1"); + p = ue->add_properties(); + p->set_type(protobufs::Property::StringType); + p->set_key("Status"); + p->set_string_value("Medium Adult"); + + // Remove the extra properties + ue->add_remove_props("Since"); + cmds.push_back(&cmdupdate); + + // Re-query with different properties + protobufs::Command cmdqueryu; + cmdqueryu.set_cmd_id(protobufs::Command::QueryEdge); + cmdqueryu.set_tx_id(txid); + qe = cmdqueryu.mutable_query_edge(); + qc = qe->mutable_constraints(); + qr = qe->mutable_results(); + qe->set_identifier(-1); + qc->set_tag(""); + qc->set_p_op(protobufs::And); + pp = qc->add_predicates(); + pp->set_key("Status"); + pp->set_op(protobufs::PropertyPredicate::Eq); + p = pp->mutable_v1(); + p->set_type(protobufs::Property::StringType); + // I think the key is not required here. + p->set_key("Status"); + p->set_string_value("Medium Adult"); + qr->set_r_type(protobufs::List); + key = qr->add_response_keys(); + *key = "Since"; + key = qr->add_response_keys(); + *key = "StartHospital"; + cmds.push_back(&cmdqueryu); + query_count++; + + protobufs::Command cmdtxend; + // Commit here doesn't change anything. Just indicates end of TX + cmdtxend.set_cmd_id(protobufs::Command::TxCommit); + cmdtxend.set_tx_id(txid); + cmds.push_back(&cmdtxend); + query_count++; + + vector> responses = qh.process_queries(cmds, query_count, false); + int edgecount, propcount = 0; + for (int q = 0; q < query_count; ++q) { + vector response = responses[q]; + int qcount = 0; + for (auto it : response) { + EXPECT_EQ(it->error_code(), protobufs::CommandResponse::Success) << it->error_msg(); + if (it->r_type() == protobufs::List) { + if (qcount == 4) { // First query + auto mymap = it->prop_values(); + for(auto m_it : mymap) { + // Assuming string for now + protobufs::PropertyList &p = m_it.second; + edgecount = 0; + for (int i = 0; i < p.values_size(); ++i) { + print_property(m_it.first, p.values(i)); + edgecount++; + } + propcount++; + } + EXPECT_EQ(propcount, 1) << "Not enough properties read"; + propcount = 0; + } + else if (q == 6) { + auto mymap = it->prop_values(); + for(auto m_it : mymap) { + // Assuming string for now + protobufs::PropertyList &p = m_it.second; + edgecount = 0; + for (int i = 0; i < p.values_size(); ++i) { + print_property(m_it.first, p.values(i)); + edgecount++; + } + propcount++; + } + EXPECT_EQ(propcount, 2) << "Not enough properties read"; + propcount = 0; + } + } + if (it->r_type() == protobufs::Count) { + EXPECT_EQ(it->op_int_value(), 1) << "Doesn't match expected update count"; + } + qcount++; + //printf("\n"); + } + } + EXPECT_EQ(edgecount, 1) << "Not enough edges found"; + } + VDMSConfig::destroy(); + PMGDQueryHandler::destroy(); } diff --git a/tests/python/TestBoundingBox.py b/tests/python/TestBoundingBox.py new file mode 100644 index 00000000..f7f2b7ba --- /dev/null +++ b/tests/python/TestBoundingBox.py @@ -0,0 +1,539 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2017 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms + +hostname = "localhost" +port = 55557 + +class TestBoundingBox(unittest.TestCase): + @classmethod + def setUpClass(self): + self.number_of_inserts = 2 + + #Method to insert one bounding box + def insertBoundingBox(self, db, props=None): + + all_queries = [] + bb = {} + + bb_coords = {} + bb_coords["x"] = 10 + bb_coords["y"] = 10 + bb_coords["h"] = 100 + bb_coords["w"] = 100 + bb["rectangle"] = bb_coords + + # adds some prop + if not props is None: + bb["properties"] = props + + query = {} + query["AddBoundingBox"] = bb + + all_queries.append(query) + response, res_arr = db.query(all_queries) + + # Check success + self.assertEqual(response[0]["AddBoundingBox"]["status"], 0) + + def addBoundingBoxwithImage(self, db, numBoxes, imgprops=None): + all_queries = [] + imgs_arr = [] + + fd = open("../test_images/brain.png", 'rb') + imgs_arr.append(fd.read()) + fd.close() + + img = {} + img["properties"] = imgprops + img["format"] = "png" + img["_ref"] = 12 + + query = {} + query["AddImage"] = img + + all_queries.append(query) + + basename = imgprops["name"] + "_bb_" + for x in range(0, numBoxes): + bb_coords = {} + bb_coords["x"] = x*10 + bb_coords["y"] = x*10 + bb_coords["h"] = 100 + bb_coords["w"] = 100 + + bbprops = {} + bbprops["name"] = basename + str(x) + bb = {} + bb["properties"] = bbprops + bb["rectangle"] = bb_coords + bb["image"] = 12 + + bb_query = {} + bb_query["AddBoundingBox"] = bb + all_queries.append(bb_query) + + response, res_arr = db.query(all_queries, [imgs_arr]) + + self.assertEqual(len(response), numBoxes + 1) + self.assertEqual(response[0]["AddImage"]["status"], 0) + for i in range(0, numBoxes): + self.assertEqual(response[i+1]["AddBoundingBox"]["status"], 0) + + def test_addBoundingBox(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + for i in range(0, self.number_of_inserts): + bb_coords = {} + bb_coords["x"] = i + bb_coords["y"] = i + bb_coords["h"] = 512 + bb_coords["w"] = 512 + + props = {} + props["name"] = "my_bb_" + str(i) + + bb = {} + bb["properties"] = props + bb["rectangle"] = bb_coords + + query = {} + query["AddBoundingBox"] = bb + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(len(response), self.number_of_inserts) + for i in range(0, self.number_of_inserts): + self.assertEqual(response[i]["AddBoundingBox"]["status"], 0) + + def test_findBoundingBox(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "find_my_bb_" + + for i in range(0, self.number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + self.insertBoundingBox(db, props=props) + + all_queries = [] + + for i in range(0, self.number_of_inserts): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["list"] = ["name"] + + bb_params = {} + bb_params["constraints"] = constraints + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[1]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[0]["FindBoundingBox"]["entities"][0]["name"], prefix_name + "0") + self.assertEqual(response[1]["FindBoundingBox"]["entities"][0]["name"], prefix_name + "1") + + def test_findBoundingBoxCoordinates(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "find_my_bb_coords_" + + for i in range(0, self.number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + self.insertBoundingBox(db, props=props) + + all_queries = [] + + for i in range(0, self.number_of_inserts): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["list"] = ["_coordinates"] + + bb_params = {} + bb_params["constraints"] = constraints + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + for i in range(0, self.number_of_inserts): + self.assertEqual(response[i]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[i]["FindBoundingBox"]["entities"][0]["_coordinates"]["x"], 10) + self.assertEqual(response[i]["FindBoundingBox"]["entities"][0]["_coordinates"]["y"], 10) + self.assertEqual(response[i]["FindBoundingBox"]["entities"][0]["_coordinates"]["w"], 100) + self.assertEqual(response[i]["FindBoundingBox"]["entities"][0]["_coordinates"]["h"], 100) + + def test_addBoundingBoxWithImage(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + imgs_arr = [] + + fd = open("../test_images/brain.png", 'rb') + imgs_arr.append(fd.read()) + fd.close() + + imgprops = {} + imgprops["name"] = "my_brain" + img_params = {} + img_params["properties"] = imgprops + img_params["format"] = "png" + img_params["_ref"] = 12 + + query = {} + query["AddImage"] = img_params + + all_queries.append(query) + + bb_coords = {} + bb_coords["x"] = 100 + bb_coords["y"] = 100 + bb_coords["h"] = 180 + bb_coords["w"] = 180 + + props = {} + props["name"] = "my_brain_bb" + + bb = {} + bb["properties"] = props + bb["rectangle"] = bb_coords + bb["image"] = 12 + + bb_query = {} + bb_query["AddBoundingBox"] = bb + + all_queries.append(bb_query) + + response, res_arr = db.query(all_queries, [imgs_arr]) + + self.assertEqual(response[0]["AddImage"]["status"], 0) + self.assertEqual(response[1]["AddBoundingBox"]["status"], 0) + + def test_findBoundingBoxesInImage(self): + db = vdms.vdms() + db.connect(hostname, port) + + img_name = "my_brain_multiple" + imgprops = {} + imgprops["name"] = img_name + self.addBoundingBoxwithImage(db, self.number_of_inserts, imgprops) + + all_queries = [] + + constraints = {} + constraints["name"] = ["==", img_name] + img = {} + img["constraints"] = constraints + img["_ref"] = 42 + query = {} + query["FindImage"] = img + all_queries.append(query) + + results = {} + results["list"] = ["_coordinates", "name"] + + bb_params = {} + bb_params["image"] = 42 + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindImage"]["status"], 0) + self.assertEqual(response[1]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[1]["FindBoundingBox"]["returned"], self.number_of_inserts) + + for i in range(0, self.number_of_inserts): + ind = self.number_of_inserts - i - 1 + self.assertEqual(response[1]["FindBoundingBox"]["entities"][i]["_coordinates"]["x"], 10 * ind) + self.assertEqual(response[1]["FindBoundingBox"]["entities"][i]["_coordinates"]["y"], 10 * ind) + self.assertEqual(response[1]["FindBoundingBox"]["entities"][i]["_coordinates"]["w"], 100) + self.assertEqual(response[1]["FindBoundingBox"]["entities"][i]["_coordinates"]["h"], 100) + self.assertEqual(response[1]["FindBoundingBox"]["entities"][i]["name"], "my_brain_multiple_bb_" + str(ind)) + + + def test_findBoundingBoxByCoordinates(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + rect_coords = {} + rect_coords["x"] = 0 + rect_coords["y"] = 0 + rect_coords["w"] = 500 + rect_coords["h"] = 500 + + results = {} + results["list"] = ["name"] + + bb_params = {} + bb_params["rectangle"] = rect_coords + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) + + def test_findBoundingBoxBlob(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "my_brain_return_" + all_queries = [] + + for i in range(0, self.number_of_inserts): + db = vdms.vdms() + db.connect(hostname, port) + + img_name = prefix_name + str(i) + imgprops = {} + imgprops["name"] = img_name + self.addBoundingBoxwithImage(db, 1, imgprops) + + for i in range(0, self.number_of_inserts): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i) + "_bb_0"] + + results = {} + results["blob"] = True + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + img_params["results"] = results + + query = {} + query["FindBoundingBox"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(len(img_array), self.number_of_inserts) + for i in range(0, self.number_of_inserts): + coord = self.number_of_inserts - i - 1 + self.assertEqual(response[i]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[i]["FindBoundingBox"]["entities"][0]["name"], prefix_name + str(i) + "_bb_0") + + def test_findBoundingBoxBlobComplex(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "my_brain_complex_" + all_queries = [] + + for i in range(0, self.number_of_inserts): + db = vdms.vdms() + db.connect(hostname, port) + + img_name = prefix_name + str(i) + imgprops = {} + imgprops["name"] = img_name + self.addBoundingBoxwithImage(db, 1, imgprops) + + rect_coords = {} + rect_coords["x"] = 0 + rect_coords["y"] = 0 + rect_coords["w"] = 500 + rect_coords["h"] = 500 + + results = {} + results["blob"] = True + results["list"] = ["name"] + + bb_params = {} + bb_params["rectangle"] = rect_coords + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) + self.assertTrue(len(img_array) >= self.number_of_inserts) + for i in range(0, self.number_of_inserts): + test = {} + test["name"] = prefix_name + str(i) + "_bb_0" + self.assertIn(test, response[0]["FindBoundingBox"]["entities"]) + + def test_updateBoundingBox(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "update_bb_" + + for i in range(0, self.number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + self.insertBoundingBox(db, props=props) + + all_queries = [] + + constraints = {} + constraints["name"] = ["==", prefix_name + str(0)] + + props = {} + props["name"] = "updated_bb_0" + + bb_params = {} + bb_params["constraints"] = constraints + bb_params["properties"] = props + + query = {} + query["UpdateBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["UpdateBoundingBox"]["status"], 0) + self.assertEqual(response[0]["UpdateBoundingBox"]["count"], 1) + + all_queries = [] + constraints = {} + constraints["name"] = ["==", "updated_bb_0"] + + results = {} + results["list"] = ["name"] + + bb_params = {} + bb_params["constraints"] = constraints + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[0]["FindBoundingBox"]["entities"][0]["name"], "updated_bb_0") + + def test_updateBoundingBoxCoords(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "update_bb_" + + for i in range(0, self.number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + self.insertBoundingBox(db, props=props) + + all_queries = [] + + constraints = {} + constraints["name"] = ["==", prefix_name + str(0)] + + rect_coords = {} + rect_coords["x"] = 15 + rect_coords["y"] = 15 + rect_coords["w"] = 75 + rect_coords["h"] = 75 + + bb_params = {} + bb_params["constraints"] = constraints + bb_params["rectangle"] = rect_coords + + query = {} + query["UpdateBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["UpdateBoundingBox"]["status"], 0) + self.assertEqual(response[0]["UpdateBoundingBox"]["count"], 1) + + all_queries = [] + constraints = {} + constraints["name"] = ["==", prefix_name + str(0)] + + results = {} + results["list"] = ["_coordinates"] + + bb_params = {} + bb_params["constraints"] = constraints + bb_params["results"] = results + + query = {} + query["FindBoundingBox"] = bb_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindBoundingBox"]["status"], 0) + self.assertEqual(response[0]["FindBoundingBox"]["entities"][0]["_coordinates"]["x"], 15) + self.assertEqual(response[0]["FindBoundingBox"]["entities"][0]["_coordinates"]["y"], 15) + self.assertEqual(response[0]["FindBoundingBox"]["entities"][0]["_coordinates"]["w"], 75) + self.assertEqual(response[0]["FindBoundingBox"]["entities"][0]["_coordinates"]["h"], 75) diff --git a/tests/python/TestDescriptors.py b/tests/python/TestDescriptors.py new file mode 100644 index 00000000..515928a1 --- /dev/null +++ b/tests/python/TestDescriptors.py @@ -0,0 +1,202 @@ +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms # Yeah, baby + +hostname = "localhost" +port = 55557 + +class TestDescriptors(unittest.TestCase): + + def test_addSet(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + descriptor_set = {} + descriptor_set["name"] = "features_xd" + descriptor_set["dimensions"] = 1024*4 + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + # Check success + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + def test_addSetAndDescriptors(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + # Add Set + set_name = "features_128d" + dims = 1024 + descriptor_set = {} + descriptor_set["name"] = set_name + descriptor_set["dimensions"] = dims + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + # Add Descriptors + all_queries = [] + descriptor_blob = [] + + x = np.zeros(dims) + x = x.astype('float32') + # print type(x[0]) + # print "size: ", len(x.tobytes())/4 + descriptor_blob.append(x.tobytes()) + + descriptor = {} + descriptor["set"] = set_name + + query = {} + query["AddDescriptor"] = descriptor + + all_queries.append(query) + + response, img_array = db.query(all_queries, [descriptor_blob]) + + # Check success + self.assertEqual(response[0]["AddDescriptor"]["status"], 0) + + def test_addDescriptorsx1000(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + # Add Set + set_name = "features_128dx1000" + dims = 128 + descriptor_set = {} + descriptor_set["name"] = set_name + descriptor_set["dimensions"] = dims + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + all_queries = [] + descriptor_blob = [] + + total = 1000; + + for i in range(1,total): + x = np.ones(dims) + x[2] = 2.34 + i*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + descriptor = {} + descriptor["set"] = set_name + descriptor["label"] = "classX" + + query = {} + query["AddDescriptor"] = descriptor + + all_queries.append(query) + + response, img_array = db.query(all_queries, [descriptor_blob]) + + # Check success + for x in range(0,total-1): + self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + + + def test_classifyDescriptor(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + # Add Set + set_name = "features_128d_4_classify" + dims = 128 + descriptor_set = {} + descriptor_set["name"] = set_name + descriptor_set["dimensions"] = dims + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + all_queries = [] + descriptor_blob = [] + + total = 1000; + + class_counter = -1 + for i in range(0,total-1): + if ((i % 4) == 0): + class_counter += 1 + + x = np.ones(dims) + x[2] = 2.34 + i*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + descriptor = {} + descriptor["set"] = set_name + descriptor["label"] = "class" + str(class_counter) + + query = {} + query["AddDescriptor"] = descriptor + + all_queries.append(query) + + response, img_array = db.query(all_queries, [descriptor_blob]) + + # Check success + for x in range(0,total-1): + self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + + + descriptor = {} + descriptor["set"] = set_name + + query = {} + query["ClassifyDescriptor"] = descriptor + + for i in range(2, total//10, 4): + all_queries = [] + descriptor_blob = [] + + x = np.ones(dims) + x[2] = 2.34 + i*20 # Calculated to be of class1 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + all_queries.append(query) + + response, img_array = db.query(all_queries, [descriptor_blob]) + + # Check success + self.assertEqual(response[0]["ClassifyDescriptor"]["status"], 0) + self.assertEqual(response[0]["ClassifyDescriptor"] + ["label"], "class" + str(int(i/4))) diff --git a/tests/python/TestEntities.py b/tests/python/TestEntities.py new file mode 100644 index 00000000..4e587be3 --- /dev/null +++ b/tests/python/TestEntities.py @@ -0,0 +1,339 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2017 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms + +hostname = "localhost" +port = 55557 + +class TestEntities(unittest.TestCase): + + def addEntity(self, thID, results): + + db = vdms.vdms() + db.connect(hostname, port) + + props = {} + props["name"] = "Luis" + props["lastname"] = "Ferro" + props["age"] = 27 + props["threadid"] = thID + + addEntity = {} + addEntity["properties"] = props + addEntity["class"] = "AwesomePeople" + + query = {} + query["AddEntity"] = addEntity + + all_queries = [] + all_queries.append(query) + + response, res_arr = db.query(all_queries) + # print (db.get_last_response_str()) + + try: + self.assertEqual(response[0]["AddEntity"]["status"], 0) + except: + results[thID] = -1 + + results[thID] = 0 + + def findEntity(self, thID, results): + + db = vdms.vdms() + db.connect(hostname, port) + + constraints = {} + constraints["threadid"] = ["==",thID] + + findEntity = {} + findEntity["constraints"] = constraints + findEntity["class"] = "AwesomePeople" + + results = {} + results["list"] = ["name", "lastname", "threadid"] + findEntity["results"] = results + + query = {} + query["FindEntity"] = findEntity + + all_queries = [] + all_queries.append(query) + + response, res_arr = db.query(all_queries) + + try: + + self.assertEqual(response[0]["FindEntity"]["status"], 0) + self.assertEqual(response[0]["FindEntity"]["entities"][0] + ["lastname"], "Ferro") + self.assertEqual(response[0]["FindEntity"]["entities"][0] + ["threadid"], thID) + except: + results[thID] = -1 + + results[thID] = 0 + + def test_runMultipleAdds(self): + + # Test concurrent AddEntities + concurrency = 32 + thread_arr = [] + results = [None] * concurrency + for i in range(0,concurrency): + thread_add = Thread(target=self.addEntity,args=(i, results) ) + thread_add.start() + thread_arr.append(thread_add) + + idx = 0 + error_counter = 0 + for th in thread_arr: + th.join() + if (results[idx] == -1): + error_counter += 1 + idx += 1 + + self.assertEqual(error_counter, 0) + + thread_arr = [] + + # Tests concurrent AddEntities and FindEntities (that should exists) + results = [None] * concurrency * 2 + for i in range(0,concurrency): + addidx = concurrency + i + thread_add = Thread(target=self.addEntity,args=(addidx, results) ) + thread_add.start() + thread_arr.append(thread_add) + + thread_find = Thread( + target=self.findEntity,args=(i, results) ) + thread_find.start() + thread_arr.append(thread_find) + + idx = 0 + error_counter = 0 + for th in thread_arr: + th.join(); + if (results[idx] == -1): + error_counter += 1 + + idx += 1 + + self.assertEqual(error_counter, 0) + + def test_addFindEntity(self): + results = [None] * 1 + self.addEntity(0, results); + self.findEntity(0, results); + + def test_addEntityWithLink(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + props = {} + props["name"] = "Luis" + props["lastname"] = "Ferro" + props["age"] = 27 + + addEntity = {} + addEntity["_ref"] = 32 + addEntity["properties"] = props + addEntity["class"] = "AwesomePeople" + + query = {} + query["AddEntity"] = addEntity + + all_queries.append(query) + + props = {} + props["name"] = "Luis" + props["lastname"] = "Bueno" + props["age"] = 27 + + link = {} + link["ref"] = 32 + link["direction"] = "in" + link["class"] = "Friends" + + addEntity = {} + addEntity["properties"] = props + addEntity["class"] = "AwesomePeople" + addEntity["link"] = link + + img_params = {} + + query = {} + query["AddEntity"] = addEntity + + all_queries.append(query) + + response, res_arr = db.query(all_queries) + + self.assertEqual(response[0]["AddEntity"]["status"], 0) + self.assertEqual(response[1]["AddEntity"]["status"], 0) + + def test_FindWithSortKey(self): + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + number_of_inserts = 10 + + for i in range(0,number_of_inserts): + + props = {} + props["name"] = "entity_" + str(i) + props["id"] = i + + entity = {} + entity["properties"] = props + entity["class"] = "Random" + + query = {} + query["AddEntity"] = entity + + all_queries.append(query) + + response, blob_arr = db.query(all_queries) + + self.assertEqual(len(response), number_of_inserts) + for i in range(0, number_of_inserts): + self.assertEqual(response[i]["AddEntity"]["status"], 0) + + all_queries = [] + + results = {} + results["list"] = ["name", "id"] + results["sort"] = "id" + + entity = {} + entity["results"] = results + entity["class"] = "Random" + + query = {} + query["FindEntity"] = entity + + all_queries.append(query) + + response, blob_arr = db.query(all_queries) + + self.assertEqual(response[0]["FindEntity"]["status"], 0) + for i in range(0, number_of_inserts): + self.assertEqual(response[0]["FindEntity"]["entities"][i]["id"], i) + + def test_FindWithSortBlock(self): + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + number_of_inserts = 10 + + for i in range(0,number_of_inserts): + + props = {} + props["name"] = "entity_" + str(i) + props["id"] = i + + entity = {} + entity["properties"] = props + entity["class"] = "SortBlock" + + query = {} + query["AddEntity"] = entity + + all_queries.append(query) + + response, blob_arr = db.query(all_queries) + + self.assertEqual(len(response), number_of_inserts) + for i in range(0, number_of_inserts): + self.assertEqual(response[i]["AddEntity"]["status"], 0) + + all_queries = [] + + sort = {} + sort["key"] = "id" + sort["order"] = "ascending" + + results = {} + results["list"] = ["name", "id"] + results["sort"] = sort + + entity = {} + entity["results"] = results + entity["class"] = "SortBlock" + + query = {} + query["FindEntity"] = entity + + all_queries.append(query) + + response, blob_arr = db.query(all_queries) + + self.assertEqual(response[0]["FindEntity"]["status"], 0) + for i in range(0, number_of_inserts): + self.assertEqual(response[0]["FindEntity"]["entities"][i]["id"], i) + + all_queries = [] + + sort = {} + sort["key"] = "id" + sort["order"] = "descending" + + results = {} + results["list"] = ["name", "id"] + results["sort"] = sort + + entity = {} + entity["results"] = results + entity["class"] = "SortBlock" + + query = {} + query["FindEntity"] = entity + + all_queries.append(query) + + response, blob_arr = db.query(all_queries) + + self.assertEqual(response[0]["FindEntity"]["status"], 0) + for i in range(0, number_of_inserts): + self.assertEqual(response[0]["FindEntity"]["entities"][i]["id"], + number_of_inserts - 1 - i) diff --git a/tests/python/TestEntitiesBlobs.py b/tests/python/TestEntitiesBlobs.py new file mode 100644 index 00000000..0da62fde --- /dev/null +++ b/tests/python/TestEntitiesBlobs.py @@ -0,0 +1,157 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2017 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms + +hostname = "localhost" +port = 55557 + +class TestEntitiesBlob(unittest.TestCase): + + def test_addEntityWithBlob(self, thID=0): + + db = vdms.vdms() + db.connect(hostname, port) + + props = {} + props["name"] = "Luis" + props["lastname"] = "Ferro" + props["age"] = 27 + props["threadid"] = thID + + addEntity = {} + addEntity["properties"] = props + addEntity["class"] = "AwesomePeople" + addEntity["blob"] = True + + query = {} + query["AddEntity"] = addEntity + + all_queries = [] + all_queries.append(query) + + blob_arr = [] + fd = open("../test_images/brain.png", 'rb') + blob_arr.append(fd.read()) + fd.close() + + response, res_arr = db.query(all_queries, [blob_arr]) + + self.assertEqual(response[0]["AddEntity"]["status"], 0) + + def test_addEntityWithBlobNoBlob(self, thID=0): + + db = vdms.vdms() + db.connect(hostname, port) + + props = {} + props["name"] = "Luis" + props["lastname"] = "Ferro" + props["age"] = 27 + props["threadid"] = thID + + addEntity = {} + addEntity["properties"] = props + addEntity["class"] = "AwesomePeople" + addEntity["blob"] = True + + query = {} + query["AddEntity"] = addEntity + + all_queries = [] + all_queries.append(query) + + response, res_arr = db.query(all_queries) + + self.assertEqual(response[0]["status"], -1) + self.assertEqual(response[0]["info"], + "Expected blobs: 1. Received blobs: 0") + + def test_addEntityWithBlobAndFind(self, thID=0): + + db = vdms.vdms() + db.connect(hostname, port) + + props = {} + props["name"] = "Tom" + props["lastname"] = "Slash" + props["age"] = 27 + props["id"] = 45334 + + addEntity = {} + addEntity["properties"] = props + addEntity["class"] = "NotSoAwesome" + addEntity["blob"] = True + + query = {} + query["AddEntity"] = addEntity + + all_queries = [] + all_queries.append(query) + + blob_arr = [] + fd = open("../test_images/brain.png", 'rb') + blob_arr.append(fd.read()) + fd.close() + + response, res_arr = db.query(all_queries, [blob_arr]) + + self.assertEqual(response[0]["AddEntity"]["status"], 0) + + constraints = {} + constraints["id"] = ["==", 45334] + + results = {} + results["blob"] = True + results["list"] = ["name"] + + FindEntity = {} + FindEntity["constraints"] = constraints + FindEntity["class"] = "NotSoAwesome" + FindEntity["results"] = results + + query = {} + query["FindEntity"] = FindEntity + + all_queries = [] + all_queries.append(query) + + response, res_arr = db.query(all_queries) + + self.assertEqual(response[0]["FindEntity"]["entities"][0]["blob"], True) + + self.assertEqual(len(res_arr), len(blob_arr)) + self.assertEqual(len(res_arr[0]), len(blob_arr[0])) + self.assertEqual((res_arr[0]), (blob_arr[0])) + diff --git a/tests/python/TestFindDescriptors.py b/tests/python/TestFindDescriptors.py new file mode 100644 index 00000000..8758719b --- /dev/null +++ b/tests/python/TestFindDescriptors.py @@ -0,0 +1,441 @@ +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms # Yeah, baby + +hostname = "localhost" +port = 55557 + +class TestFindDescriptors(unittest.TestCase): + + def create_set_and_insert(self, set_name, dims, total): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + # Add Set + descriptor_set = {} + descriptor_set["name"] = set_name + descriptor_set["dimensions"] = dims + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + all_queries = [] + descriptor_blob = [] + + class_counter = -1 + for i in range(0,total-1): + if ((i % 4) == 0): + class_counter += 1 + + x = np.ones(dims) + x[2] = 2.34 + i*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + descriptor = {} + descriptor["set"] = set_name + descriptor["label"] = "class" + str(class_counter) + + props = {} + props["myid"] = i + 200 + descriptor["properties"] = props + + query = {} + query["AddDescriptor"] = descriptor + + all_queries.append(query) + + response, img_array = db.query(all_queries, [descriptor_blob]) + + # Check success + for x in range(0,total-1): + self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + + def test_findDescByConstraints(self): + + # Add Set + set_name = "features_128d_4_findbyConst" + dims = 128 + total = 100 + self.create_set_and_insert(set_name, dims, total) + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + finddescriptor = {} + finddescriptor["set"] = set_name + + constraints = {} + constraints["myid"] = ["==", 205] + finddescriptor["constraints"] = constraints + + results = {} + results["list"] = ["myid",] + finddescriptor["results"] = results + + query = {} + query["FindDescriptor"] = finddescriptor + + all_queries = [] + all_queries.append(query) + + response, img_array = db.query(all_queries) + + # Check success + self.assertEqual(response[0]["FindDescriptor"]["status"], 0) + self.assertEqual(response[0]["FindDescriptor"]["returned"], 1) + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][0]["myid"], 205) + + + def test_findDescByConst_get_id(self): + + # Add Set + set_name = "features_128d_4_findDescriptors_id" + dims = 128 + total = 100 + self.create_set_and_insert(set_name, dims, total) + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + finddescriptor = {} + finddescriptor["set"] = set_name + + constraints = {} + constraints["myid"] = ["==", 205] + finddescriptor["constraints"] = constraints + + results = {} + results["list"] = ["myid", "_label", "_id"] + finddescriptor["results"] = results + + query = {} + query["FindDescriptor"] = finddescriptor + + all_queries = [] + all_queries.append(query) + + response, img_array = db.query(all_queries) + + # Check success + self.assertEqual(response[0]["FindDescriptor"]["status"], 0) + self.assertEqual(response[0]["FindDescriptor"]["returned"], 1) + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][0]["myid"], 205) + + def test_findDescByBlob(self): + + # Add Set + set_name = "findwith_blob" + dims = 128 + total = 100 + self.create_set_and_insert(set_name, dims, total) + + db = vdms.vdms() + db.connect(hostname, port) + + kn = 3 + + all_queries = [] + + finddescriptor = {} + finddescriptor["set"] = set_name + + results = {} + results["list"] = ["myid", "_id", "_distance"] + results["blob"] = True + finddescriptor["results"] = results + finddescriptor["k_neighbors"] = kn + + query = {} + query["FindDescriptor"] = finddescriptor + + all_queries = [] + all_queries.append(query) + + descriptor_blob = [] + x = np.ones(dims) + x[2] = 2.34 + 30*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + response, blob_array = db.query(all_queries, [descriptor_blob]) + + self.assertEqual(len(blob_array), kn) + self.assertEqual(descriptor_blob[0], blob_array[0]) + + # Check success + self.assertEqual(response[0]["FindDescriptor"]["status"], 0) + self.assertEqual(response[0]["FindDescriptor"]["returned"], kn) + + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][0]["_distance"], 0) + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][1]["_distance"], 400) + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][2]["_distance"], 400) + + def test_findDescByBlobNoResults(self): + + # Add Set + set_name = "findwith_blobNoResults" + dims = 128 + total = 100 + self.create_set_and_insert(set_name, dims, total) + + db = vdms.vdms() + db.connect(hostname, port) + + kn = 1 + + all_queries = [] + + finddescriptor = {} + finddescriptor["set"] = set_name + + results = {} + results["blob"] = True + finddescriptor["results"] = results + finddescriptor["k_neighbors"] = kn + + query = {} + query["FindDescriptor"] = finddescriptor + + all_queries = [] + all_queries.append(query) + + descriptor_blob = [] + x = np.ones(dims) + x[2] = 2.34 + 30*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + response, blob_array = db.query(all_queries, [descriptor_blob]) + + + # Check success + self.assertEqual(response[0]["FindDescriptor"]["status"], 0) + self.assertEqual(response[0]["FindDescriptor"]["returned"], kn) + self.assertEqual(len(blob_array), kn) + self.assertEqual(descriptor_blob[0], blob_array[0]) + + # def test_findDescByBlobAndConstraints(self): + + # # Add Set + # set_name = "findwith_blob" + # dims = 128 + # total = 100 + # self.create_set_and_insert(set_name, dims, total) + + # db = vdms.vdms() + # db.connect(hostname, port) + + # kn = 3 + + # all_queries = [] + + # finddescriptor = {} + # finddescriptor["set"] = set_name + # finddescriptor["k_neighbors"] = kn + + # results = {} + # results["list"] = ["myid", "_id", "_distance"] + # results["blob"] = True + # finddescriptor["results"] = results + + # constraints = {} + # constraints["myid"] = ["==", 205] + # finddescriptor["constraints"] = constraints + + # query = {} + # query["FindDescriptor"] = finddescriptor + + # all_queries = [] + # all_queries.append(query) + + # descriptor_blob = [] + # x = np.ones(dims) + # x[2] = 2.34 + 30*20 + # x = x.astype('float32') + # descriptor_blob.append(x.tobytes()) + + # response, blob_array = db.query(all_queries, [descriptor_blob]) + + # self.assertEqual(len(blob_array), kn) + # self.assertEqual(descriptor_blob[0], blob_array[0]) + + # # Check success + # self.assertEqual(response[0]["FindDescriptor"]["status"], 0) + # self.assertEqual(response[0]["FindDescriptor"]["returned"], kn) + + # self.assertEqual(response[0]["FindDescriptor"] + # ["entities"][0]["_distance"], 0) + # self.assertEqual(response[0]["FindDescriptor"] + # ["entities"][1]["_distance"], 400) + # self.assertEqual(response[0]["FindDescriptor"] + # ["entities"][2]["_distance"], 400) + + + def test_findDescByBlobWithLink(self): + + # Add Set + set_name = "findwith_blob_link" + dims = 128 + total = 100 + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + # Add Set + descriptor_set = {} + descriptor_set["name"] = set_name + descriptor_set["dimensions"] = dims + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + all_queries = [] + descriptor_blob = [] + + class_counter = -1 + for i in range(0,total-1): + if ((i % 4) == 0): + class_counter += 1 + + reference = i + 2 + + x = np.ones(dims) + x[2] = 2.34 + i*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + descriptor = {} + descriptor["set"] = set_name + descriptor["label"] = "class" + str(class_counter) + + props = {} + props["myid"] = i + 200 + descriptor["properties"] = props + descriptor["_ref"] = reference + + query = {} + query["AddDescriptor"] = descriptor + + all_queries.append(query) + + props = {} + props["entity_prop"] = i + 200 + + addEntity = {} + addEntity["properties"] = props + addEntity["class"] = "randomentity" + + link = {} + link["ref"] = reference + addEntity["link"] = link + + query = {} + query["AddEntity"] = addEntity + + all_queries.append(query) + + response, img_array = db.query(all_queries, [descriptor_blob]) + + # Check success + for x in range(0,total-1,2): + self.assertEqual(response[x]["AddDescriptor"]["status"], 0) + self.assertEqual(response[x+1]["AddEntity"] ["status"], 0) + + kn = 3 + reference = 102 # because I can + + all_queries = [] + + finddescriptor = {} + finddescriptor["set"] = set_name + + results = {} + results["list"] = ["myid", "_id", "_distance"] + results["blob"] = True + finddescriptor["results"] = results + finddescriptor["k_neighbors"] = kn + finddescriptor["_ref"] = reference + + query = {} + query["FindDescriptor"] = finddescriptor + + all_queries.append(query) + + descriptor_blob = [] + x = np.ones(dims) + x[2] = 2.34 + 30*20 + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + results = {} + results["list"] = ["entity_prop"] + + link = {} + link["ref"] = reference + + findEntity = {} + findEntity["results"] = results + findEntity["class"] = "randomentity" + findEntity["link"] = link + + query = {} + query["FindEntity"] = findEntity + + all_queries.append(query) + + response, blob_array = db.query(all_queries, [descriptor_blob]) + + self.assertEqual(len(blob_array), kn) + # This checks that the received blobs is the same as the inserted. + self.assertEqual(descriptor_blob[0], blob_array[0]) + + # Check success + self.assertEqual(response[0]["FindDescriptor"]["status"], 0) + self.assertEqual(response[0]["FindDescriptor"]["returned"], kn) + + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][0]["_distance"], 0) + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][1]["_distance"], 400) + self.assertEqual(response[0]["FindDescriptor"] + ["entities"][2]["_distance"], 400) + + self.assertEqual(response[1]["FindEntity"]["status"], 0) + self.assertEqual(response[1]["FindEntity"]["returned"], kn) + + self.assertEqual(response[1]["FindEntity"] + ["entities"][0]["entity_prop"], 231) + self.assertEqual(response[1]["FindEntity"] + ["entities"][1]["entity_prop"], 230) + self.assertEqual(response[1]["FindEntity"] + ["entities"][2]["entity_prop"], 229) diff --git a/tests/python/TestImages.py b/tests/python/TestImages.py new file mode 100644 index 00000000..d777f50a --- /dev/null +++ b/tests/python/TestImages.py @@ -0,0 +1,411 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2017 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms + +hostname = "localhost" +port = 55557 + +class TestImages(unittest.TestCase): + + #Methos to insert one image + def insertImage(self, db, props=None, collections=None, format="png"): + + imgs_arr = [] + all_queries = [] + + fd = open("../test_images/brain.png", 'rb') + imgs_arr.append(fd.read()) + fd.close() + + img_params = {} + + # adds some prop + if not props is None: + props["test_case"] = "test_case_prop" + img_params["properties"] = props + + if not collections is None: + img_params["collections"] = collections + + img_params["format"] = format + + query = {} + query["AddImage"] = img_params + + all_queries.append(query) + + response, res_arr = db.query(all_queries, [imgs_arr]) + + # Check success + self.assertEqual(response[0]["AddImage"]["status"], 0) + + def test_addImage(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + imgs_arr = [] + + number_of_inserts = 2 + + for i in range(0,number_of_inserts): + #Read Brain Image + fd = open("../test_images/brain.png", 'rb') + imgs_arr.append(fd.read()) + fd.close() + + op_params_resize = {} + op_params_resize["height"] = 512 + op_params_resize["width"] = 512 + op_params_resize["type"] = "resize" + + props = {} + props["name"] = "brain_" + str(i) + props["doctor"] = "Dr. Strange Love" + + img_params = {} + img_params["properties"] = props + img_params["operations"] = [op_params_resize] + img_params["format"] = "png" + + query = {} + query["AddImage"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries, [imgs_arr]) + + self.assertEqual(len(response), number_of_inserts) + for i in range(0, number_of_inserts): + self.assertEqual(response[i]["AddImage"]["status"], 0) + + def test_findEntityImage(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fent_brain_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertImage(db, props=props) + + all_queries = [] + + for i in range(0,2): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + img_params["results"] = results + img_params["class"] = "VD:IMG" + + query = {} + query["FindEntity"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindEntity"]["status"], 0) + self.assertEqual(response[1]["FindEntity"]["status"], 0) + self.assertEqual(response[0]["FindEntity"]["entities"][0]["name"], prefix_name + "0") + self.assertEqual(response[1]["FindEntity"]["entities"][0]["name"], prefix_name + "1") + + def test_findImage(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fimg_brain_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertImage(db, props=props) + + all_queries = [] + + for i in range(0,2): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + img_params = {} + img_params["constraints"] = constraints + + + query = {} + query["FindImage"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindImage"]["status"], 0) + self.assertEqual(response[1]["FindImage"]["status"], 0) + self.assertEqual(len(img_array), 2) + + def test_findImageResults(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fimg_results_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertImage(db, props=props) + + all_queries = [] + + for i in range(0,2): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + img_params["results"] = results + + query = {} + query["FindImage"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindImage"]["status"], 0) + self.assertEqual(response[1]["FindImage"]["status"], 0) + self.assertEqual(response[0]["FindImage"]["entities"][0]["name"], prefix_name + "0") + self.assertEqual(response[1]["FindImage"]["entities"][0]["name"], prefix_name + "1") + self.assertEqual(len(img_array), 2) + + def test_addImageWithLink(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + props = {} + props["name"] = "Luis" + props["lastname"] = "Ferro" + props["age"] = 27 + + addEntity = {} + addEntity["_ref"] = 32 + addEntity["properties"] = props + addEntity["class"] = "AwesomePeople" + + query = {} + query["AddEntity"] = addEntity + + all_queries.append(query) + + props = {} + props["name"] = "Luis" + props["lastname"] = "Malo" + props["age"] = 27 + + link = {} + link["ref"] = 32 + link["direction"] = "in" + link["class"] = "Friends" + + addImage = {} + addImage["properties"] = props + addImage["link"] = link + addImage["format"] = "png" + + imgs_arr = [] + + fd = open("../test_images/brain.png", 'rb') + imgs_arr.append(fd.read()) + fd.close() + + img_params = {} + + query = {} + query["AddImage"] = addImage + + all_queries.append(query) + + response, res_arr = db.query(all_queries, [imgs_arr]) + + self.assertEqual(response[0]["AddEntity"]["status"], 0) + self.assertEqual(response[1]["AddImage"]["status"], 0) + + def test_findImage_multiple_results(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fimg_brain_multiple" + + number_of_inserts = 4 + for i in range(0,number_of_inserts): + props = {} + props["name"] = prefix_name + self.insertImage(db, props=props) + + constraints = {} + constraints["name"] = ["==", prefix_name] + + results = {} + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + + query = {} + query["FindImage"] = img_params + + all_queries = [] + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(len(img_array), number_of_inserts) + self.assertEqual(response[0]["FindImage"]["status"], 0) + self.assertEqual(response[0]["FindImage"]["returned"], number_of_inserts) + + def test_findImageNoBlob(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fimg_no_blob_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertImage(db, props=props) + + all_queries = [] + + for i in range(0,2): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["blob"] = False + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + img_params["results"] = results + + query = {} + query["FindImage"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindImage"]["status"], 0) + self.assertEqual(response[1]["FindImage"]["status"], 0) + self.assertEqual(len(img_array), 0) + + def test_updateImage(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fimg_update_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertImage(db, props=props) + + all_queries = [] + + constraints = {} + constraints["name"] = ["==", prefix_name + str(0)] + + props = {} + props["name"] = "simg_update_0" + + img_params = {} + img_params["constraints"] = constraints + img_params["properties"] = props + + query = {} + query["UpdateImage"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["UpdateImage"]["count"], 1) + self.assertEqual(len(img_array), 0) + + def ztest_zFindImageWithCollection(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fimg_brain_collection_" + number_of_inserts = 4 + + colls = {} + colls = ["brainScans"] + + for i in range(0,number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + + self.insertImage(db, props=props, collections=colls) + + all_queries = [] + + for i in range(0,1): + + results = {} + results["list"] = ["name"] + + img_params = {} + img_params["collections"] = ["brainScans"] + img_params["results"] = results + + query = {} + query["FindImage"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindImage"]["status"], 0) + self.assertEqual(len(img_array), number_of_inserts) diff --git a/tests/python/TestRetail.py b/tests/python/TestRetail.py new file mode 100644 index 00000000..b4ef9412 --- /dev/null +++ b/tests/python/TestRetail.py @@ -0,0 +1,256 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2017 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms + +import longquery + +hostname = "localhost" +port = 55557 + +n_cameras = 15 +dim = 1000 +name = "features_vectors_store1" + +class TestEntities(unittest.TestCase): + + def add_descriptor_set(self, name, dim): + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + descriptor_set = {} + descriptor_set["name"] = name + descriptor_set["dimensions"] = dim + + query = {} + query["AddDescriptorSet"] = descriptor_set + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + # Check success + self.assertEqual(response[0]["AddDescriptorSet"]["status"], 0) + + def build_store(self): + + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + store_ref = 999 + + query = { + "AddEntity" : + { + "_ref" : store_ref, + "class" : "Store", + "constraints" : { "Name" : [ "==", "Walmart" ] }, + "properties" : { + "Address" : "1428 alex way, Hillsboro 97124", + "Name" : "Walmart", + "Type" : "grocerys" + } + } + } + + all_queries.append(query) + + areas_tag = ["ChildrenClothes", + "WomenClothes", + "MenClothes", + "Computers", + "Sport", + "Food", + "ChildrenClothes", + "WomenClothes", + "MenClothes", + "Computers", + "Sport", + "Food", + "ChildrenClothes", + "ChildrenClothes", + "WomenClothes", + "MenClothes", + "Computers", + "Sport", + "Food", + "ChildrenClothes" + ] + + for i in range(1,n_cameras+1): + + addCamera = { + "AddEntity" : + { + "_ref": i, + "class" : "Camera", + "constraints" : { "Name" : [ "==", "cam" + str(i) ] }, + "properties" : { + "Name" : "cam" + str(i) + } + } + } + + all_queries.append(addCamera) + + addArea = { + "AddEntity" : + { + "_ref" : n_cameras * 10 + i, + "class" : "Area", + "constraints" : { "Name" : [ "==", "Area" + str(i) ] }, + "properties" : { + "Name" : "Area" + str(i), + "Tag" : areas_tag[i] + } + } + } + + if i == 1: + addArea["AddEntity"]["properties"]["Tag"] = "Entrance" + + if i == n_cameras: + addArea["AddEntity"]["properties"]["Tag"] = "Exit" + + all_queries.append(addArea) + + addConnection = { + "AddConnection" : + { + "class" : "Covers", + "ref1" : i, + "ref2" : n_cameras * 10 + i + } + } + + all_queries.append(addConnection) + + addConnection = { + "AddConnection" : + { + "class" : "Consists_Of", + "ref1" : store_ref, + "ref2" : n_cameras * 10 + i + } + } + + all_queries.append(addConnection) + + response, res_arr = db.query(all_queries) + # print (db.get_last_response_str()) + + self.assertEqual(response[0]["AddEntity"]["status"], 0) + + for i in range(1,n_cameras+1): + self.assertEqual(response[(i-1)*4+1]["AddEntity"]["status"], 0) + self.assertEqual(response[(i-1)*4+2]["AddEntity"]["status"], 0) + self.assertEqual(response[(i-1)*4+3]["AddConnection"]["status"], 0) + self.assertEqual(response[(i-1)*4+4]["AddConnection"]["status"], 0) + + def single(self, thID, db, results): + + # id = "19149ec8-fa0d-4ed0-9cfb-3e0811b75391" + id = "19149ec8-fa0d-4ed0-9cfb-3e0811b" + str(thID) + + all_queries = longquery.queryPerson(id) + + # send one random fv + descriptor_blob = [] + x = np.ones(dim) + x[2] = 2.34 + np.random.random_sample() + x = x.astype('float32') + descriptor_blob.append(x.tobytes()) + + try: + + response, res_arr = db.query(all_queries, [descriptor_blob]) + + for i in range(0, len(response)): + cmd = list(response[i].items())[0][0] + self.assertEqual(response[i][cmd]["status"], 0) + + all_queries = longquery.queryVisit(id) + + response, res_arr = db.query(all_queries) + + for i in range(0, len(response)): + cmd = list(response[i].items())[0][0] + self.assertEqual(response[i][cmd]["status"], 0) + + except: + results[thID] = -1 + + results[thID] = 0 + + + def test_concurrent(self): + + self.build_store() + self.add_descriptor_set(name, dim) + + retries = 2 + concurrency = 64 + + db_list = [] + + for i in range(0, concurrency): + db = vdms.vdms() + db.connect(hostname, port) + db_list.append(db) + + results = [None] * concurrency * retries + for ret in range(0,retries): + + thread_arr = [] + for i in range(0,concurrency): + idx = concurrency * ret + i + thread_add = Thread( + target=self.single,args=(idx, db_list[i], results) ) + thread_add.start() + thread_arr.append(thread_add) + + idx = concurrency * ret + error_counter = 0 + for th in thread_arr: + th.join() + if (results[idx] == -1): + error_counter += 1 + idx += 1 + + self.assertEqual(error_counter, 0) diff --git a/tests/python/TestVideos.py b/tests/python/TestVideos.py new file mode 100644 index 00000000..05b62256 --- /dev/null +++ b/tests/python/TestVideos.py @@ -0,0 +1,335 @@ +# +# The MIT License +# +# @copyright Copyright (c) 2017 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# + +from threading import Thread +import sys +import os +import urllib +import time +import json +import unittest +import numpy as np +import vdms + +hostname = "localhost" +port = 55557 + +class TestVideos(unittest.TestCase): + + #Methos to insert one image + def insertVideo(self, db, props=None): + + video_arr = [] + all_queries = [] + + fd = open("../test_videos/Megamind.avi", 'rb') + video_arr.append(fd.read()) + fd.close() + + video_parms = {} + + # adds some prop + if not props is None: + props["test_case"] = "test_case_prop" + video_parms["properties"] = props + + video_parms["codec"] = "h264" + video_parms["container"] = "mp4" + + query = {} + query["AddVideo"] = video_parms + + all_queries.append(query) + + response, res_arr = db.query(all_queries, [video_arr]) + + self.assertEqual(len(response), 1) + self.assertEqual(response[0]["AddVideo"]["status"], 0) + + def test_addVideo(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + video_arr = [] + + number_of_inserts = 2 + + for i in range(0,number_of_inserts): + #Read Brain Image + fd = open("../test_videos/Megamind.avi", 'rb') + video_arr.append(fd.read()) + fd.close() + + op_params_resize = {} + op_params_resize["height"] = 512 + op_params_resize["width"] = 512 + op_params_resize["type"] = "resize" + + props = {} + props["name"] = "video_" + str(i) + props["doctor"] = "Dr. Strange Love" + + video_parms = {} + video_parms["properties"] = props + video_parms["codec"] = "h264" + + query = {} + query["AddVideo"] = video_parms + + all_queries.append(query) + + response, obj_array = db.query(all_queries, [video_arr]) + self.assertEqual(len(response), number_of_inserts) + for i in range(0, number_of_inserts): + self.assertEqual(response[i]["AddVideo"]["status"], 0) + + def test_findVideo(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "video_1_" + + number_of_inserts = 2 + + for i in range(0,number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + self.insertVideo(db, props=props) + + all_queries = [] + + for i in range(0,number_of_inserts): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + video_parms = {} + video_parms["constraints"] = constraints + + query = {} + query["FindVideo"] = video_parms + + all_queries.append(query) + + response, vid_array = db.query(all_queries) + + self.assertEqual(len(response), number_of_inserts) + self.assertEqual(len(vid_array), number_of_inserts) + for i in range(0, number_of_inserts): + self.assertEqual(response[i]["FindVideo"]["status"], 0) + + def test_findVideoResults(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "resvideo_1_" + + number_of_inserts = 2 + + for i in range(0,number_of_inserts): + props = {} + props["name"] = prefix_name + str(i) + self.insertVideo(db, props=props) + + all_queries = [] + + for i in range(0,number_of_inserts): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["list"] = ["name"] + + video_parms = {} + video_parms["constraints"] = constraints + video_parms["results"] = results + + query = {} + query["FindVideo"] = video_parms + + all_queries.append(query) + + response, vid_array = db.query(all_queries) + + self.assertEqual(len(response), number_of_inserts) + self.assertEqual(len(vid_array), number_of_inserts) + for i in range(0, number_of_inserts): + self.assertEqual(response[i]["FindVideo"]["status"], 0) + + def test_addVideoWithLink(self): + db = vdms.vdms() + db.connect(hostname, port) + + all_queries = [] + + props = {} + props["name"] = "Luis" + props["lastname"] = "Ferro" + props["age"] = 27 + + addEntity = {} + addEntity["_ref"] = 32 + addEntity["properties"] = props + addEntity["class"] = "AwPeopleVid" + + query = {} + query["AddEntity"] = addEntity + + all_queries.append(query) + + props = {} + props["name"] = "Luis" + props["lastname"] = "Malo" + props["age"] = 27 + + link = {} + link["ref"] = 32 + link["direction"] = "in" + link["class"] = "Friends" + + addVideo = {} + addVideo["properties"] = props + addVideo["link"] = link + + imgs_arr = [] + + fd = open("../test_videos/Megamind.avi", 'rb') + imgs_arr.append(fd.read()) + fd.close() + + img_params = {} + + query = {} + query["AddVideo"] = addVideo + + all_queries.append(query) + + response, res_arr = db.query(all_queries, [imgs_arr]) + + self.assertEqual(response[0]["AddEntity"]["status"], 0) + self.assertEqual(response[1]["AddVideo"]["status"], 0) + + def test_findVid_multiple_results(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "vid_multiple" + + number_of_inserts = 4 + for i in range(0,number_of_inserts): + props = {} + props["name"] = prefix_name + self.insertVideo(db, props=props) + + constraints = {} + constraints["name"] = ["==", prefix_name] + + results = {} + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + + query = {} + query["FindVideo"] = img_params + + all_queries = [] + all_queries.append(query) + + response, vid_arr = db.query(all_queries) + + self.assertEqual(len(vid_arr), number_of_inserts) + self.assertEqual(response[0]["FindVideo"]["status"], 0) + self.assertEqual(response[0]["FindVideo"]["returned"], number_of_inserts) + + def test_findVideoNoBlob(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fvid_no_blob_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertVideo(db, props=props) + + all_queries = [] + + for i in range(0,2): + constraints = {} + constraints["name"] = ["==", prefix_name + str(i)] + + results = {} + results["blob"] = False + results["list"] = ["name"] + + img_params = {} + img_params["constraints"] = constraints + img_params["results"] = results + + query = {} + query["FindVideo"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["FindVideo"]["status"], 0) + self.assertEqual(response[1]["FindVideo"]["status"], 0) + self.assertEqual(len(img_array), 0) + + def test_updateVideo(self): + db = vdms.vdms() + db.connect(hostname, port) + + prefix_name = "fvid_update_" + + for i in range(0,2): + props = {} + props["name"] = prefix_name + str(i) + self.insertVideo(db, props=props) + + all_queries = [] + + constraints = {} + constraints["name"] = ["==", prefix_name + str(0)] + + props = {} + props["name"] = "simg_update_0" + + img_params = {} + img_params["constraints"] = constraints + img_params["properties"] = props + + query = {} + query["UpdateVideo"] = img_params + + all_queries.append(query) + + response, img_array = db.query(all_queries) + + self.assertEqual(response[0]["UpdateVideo"]["count"], 1) + self.assertEqual(len(img_array), 0) diff --git a/tests/python/clean.sh b/tests/python/clean.sh deleted file mode 100644 index fdf270e3..00000000 --- a/tests/python/clean.sh +++ /dev/null @@ -1,6 +0,0 @@ -rm log.log screen.log -rm -r db -mkdir db -mkdir db/images -mkdir db/images/pngs -mkdir db/images/jpgs diff --git a/tests/python/config-tests.json b/tests/python/config-tests.json index 7a21eda2..fb1d3077 100644 --- a/tests/python/config-tests.json +++ b/tests/python/config-tests.json @@ -3,11 +3,8 @@ // Sets database paths and other parameters { // Network - "port": 55557, // Default is 55555 + "port": 55557, + "db_root_path": "test_db", - // Database paths - "pmgd_path": "db/test-graph", - "png_path": "db/images/pngs/", - "jpg_path": "db/images/jpgs/", - "tdb_path": "db/images/tiledb/tdb" + "more-info": "github.com/IntelLabs/vdms" } diff --git a/tests/python/longquery.py b/tests/python/longquery.py new file mode 100644 index 00000000..e2305b26 --- /dev/null +++ b/tests/python/longquery.py @@ -0,0 +1,684 @@ + +import os + +def queryPerson(id): + + query = [ { + "AddEntity" : + { + "_ref" : 1, + "class" : "Person", + "properties" : + { + "Id" : id, + "imaginary_node" : 1 + } + } + }, + { + "AddEntity" : + { + "_ref" : 2, + "class" : "BoundingBox", + "properties" : + { + "Height" : "267", + "Id" : id, + "Width" : "117", + "X" : "296", + "Y" : "496" + } + } + }, + { + "AddDescriptor" : + { + "_ref" : 3, + "label" : "Person", + "properties" : + { + "id" : id, + "tag" : "person", + "time_stamp" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "set" : "features_vectors_store1" + } + }, + { + "AddConnection" : + { + "class" : "Has", + "ref1" : 1, + "ref2" : 3 + } + }, + { + "AddConnection" : + { + "class" : "Represents", + "ref1" : 1, + "ref2" : 2 + } + }, + { + "AddConnection" : + { + "class" : "AppearsIn", + "ref1" : 3, + "ref2" : 2 + } + } + ] + + return query + +def queryVisit(id): + + query = [ + { + "AddEntity" : + { + "_ref" : 4, + "class" : "Visit", + "constraints" : + { + "Id" : + [ + "==", + id + ] + }, + "properties" : + { + "Id" : id, + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "starting_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + } + } + }, + { + "FindEntity" : + { + "_ref" : 5, + "class" : "Person", + "constraints" : + { + "Id" : + [ + "==", + id + ] + } + } + }, + { + "AddConnection" : + { + "class" : "visited", + "ref1" : 4, + "ref2" : 5 + } + }, + { + "FindEntity" : + { + "_ref" : 6, + "class" : "Store", + "constraints" : + { + "Name" : + [ + "==", + "Walmart" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "Store_Visit", + "ref1" : 4, + "ref2" : 6 + } + }, + { + "FindEntity" : + { + "_ref" : 7, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area15" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area15", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 7 + } + }, + { + "FindEntity" : + { + "_ref" : 8, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area14" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area14", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 8 + } + }, + { + "FindEntity" : + { + "_ref" : 9, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area13" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area13", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 9 + } + }, + { + "FindEntity" : + { + "_ref" : 10, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area12" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area12", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 10 + } + }, + { + "FindEntity" : + { + "_ref" : 11, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area11" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area11", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 11 + } + }, + { + "FindEntity" : + { + "_ref" : 12, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area10" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area10", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 12 + } + }, + { + "FindEntity" : + { + "_ref" : 13, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area9" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area9", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 13 + } + }, + { + "FindEntity" : + { + "_ref" : 14, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area8" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area8", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 14 + } + }, + { + "FindEntity" : + { + "_ref" : 15, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area7" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area7", + "ending_time" : + { + + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + + "passing_time" : + { + + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 15 + } + }, + { + "FindEntity" : + { + "_ref" : 16, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area6" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area6", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 16 + } + }, + { + "FindEntity" : + { + "_ref" : 17, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area5" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area5", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 17 + } + }, + { + "FindEntity" : + { + "_ref" : 18, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area4" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area4", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 18 + } + }, + { + "FindEntity" : + { + "_ref" : 19, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area3" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area3", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 19 + } + }, + { + "FindEntity" : + { + "_ref" : 20, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area2" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area2", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 20 + } + }, + { + "FindEntity" : + { + "_ref" : 21, + "class" : "Area", + "constraints" : + { + "Name" : + [ + "==", + "Area1" + ] + } + } + }, + { + "AddConnection" : + { + "class" : "PassBy", + "properties" : + { + "Area" : "Area1", + "ending_time" : + { + "_date" : "Sat Jan 06 23:03:00 PDT 2018" + }, + "passing_time" : + { + "_date" : "Sat Jan 06 23:00:00 PST 83186920" + } + }, + "ref1" : 4, + "ref2" : 21 + } + } + ] + + return query diff --git a/tests/python/main.py b/tests/python/main.py index 4cc4a8a9..87941319 100644 --- a/tests/python/main.py +++ b/tests/python/main.py @@ -34,424 +34,5 @@ import numpy as np import vdms -hostname = "localhost" -port = 55557 - -class TestMultiClient(unittest.TestCase): - - def addEntity(self, thID=0): - - db = vdms.VDMS() - db.connect(hostname, port) - - props = {} - props["name"] = "Luis" - props["lastname"] = "Ferro" - props["age"] = 27 - props["threadid"] = thID - - addEntity = {} - addEntity["properties"] = props - addEntity["class"] = "AwesomePeople" - - query = {} - query["AddEntity"] = addEntity - - all_queries = [] - all_queries.append(query) - - response, res_arr = db.query(all_queries) - response = json.loads(response) - - self.assertEqual(response[0]["AddEntity"]["status"], 0) - - def findEntity(self, thID): - - db = vdms.VDMS() - db.connect(hostname, port) - - constraints = {} - constraints["threadid"] = ["==",thID] - - findEntity = {} - findEntity["constraints"] = constraints - findEntity["class"] = "AwesomePeople" - - results = {} - results["list"] = ["name", "lastname", "threadid"] - findEntity["results"] = results - - query = {} - query["FindEntity"] = findEntity - - all_queries = [] - all_queries.append(query) - - response, res_arr = db.query(all_queries) - response = json.loads(response) - # print vdms.aux_print_json(response) - - self.assertEqual(response[0]["FindEntity"]["status"], 0) - self.assertEqual(response[0]["FindEntity"]["entities"][0] - ["lastname"], "Ferro") - self.assertEqual(response[0]["FindEntity"]["entities"][0] - ["threadid"], thID) - - def test_runMultipleAdds(self): - - simultaneous = 1000; - thread_arr = [] - for i in range(1,simultaneous): - thread_add = Thread(target=self.addEntity,args=(i,) ) - thread_add.start() - thread_arr.append(thread_add) - - for i in range(1,simultaneous): - thread_find = Thread(target=self.findEntity,args=(i,) ) - thread_find.start() - thread_arr.append(thread_find) - - for th in thread_arr: - th.join(); - - def test_addFindEntity(self): - self.addEntity(9000); - self.findEntity(9000); - - def test_addEntityWithLink(self): - db = vdms.VDMS() - db.connect(hostname, port) - - all_queries = [] - - props = {} - props["name"] = "Luis" - props["lastname"] = "Ferro" - props["age"] = 27 - - addEntity = {} - addEntity["_ref"] = 32 - addEntity["properties"] = props - addEntity["class"] = "AwesomePeople" - - query = {} - query["AddEntity"] = addEntity - - all_queries.append(query) - - props = {} - props["name"] = "Luis" - props["lastname"] = "Bueno" - props["age"] = 27 - - link = {} - link["ref"] = 32 - link["direction"] = "in" - link["class"] = "Friends" - - addEntity = {} - addEntity["properties"] = props - addEntity["class"] = "AwesomePeople" - addEntity["link"] = link - - img_params = {} - - query = {} - query["AddEntity"] = addEntity - - all_queries.append(query) - - # print json.dumps(all_queries) - # vdms.aux_print_json(all_queries) - - response, res_arr = db.query(all_queries) - response = json.loads(response) - # vdms.aux_print_json(response) - - self.assertEqual(response[0]["AddEntity"]["status"], 0) - self.assertEqual(response[1]["AddEntity"]["status"], 0) - -class TestAddImage(unittest.TestCase): - - #Methos to insert one image - def insertImage(self, db, props=None, collections=None, format="png"): - - imgs_arr = [] - all_queries = [] - - fd = open("../test_images/brain.png") - imgs_arr.append(fd.read()) - - img_params = {} - - # adds some prop - if not props is None: - props["test_case"] = "test_case_prop" - img_params["properties"] = props - - if not collections is None: - img_params["collections"] = collections - - img_params["format"] = format - - query = {} - query["AddImage"] = img_params - - all_queries.append(query) - - response, res_arr = db.query(all_queries, [imgs_arr]) - - # Check success - response = json.loads(response) - self.assertEqual(response[0]["AddImage"]["status"], 0) - - def test_addImage(self): - db = vdms.VDMS() - db.connect(hostname, port) - - all_queries = [] - imgs_arr = [] - - number_of_inserts = 2 - - for i in range(0,number_of_inserts): - #Read Brain Image - fd = open("../test_images/brain.png") - imgs_arr.append(fd.read()) - - op_params_resize = {} - op_params_resize["height"] = 512 - op_params_resize["width"] = 512 - op_params_resize["type"] = "resize" - - props = {} - props["name"] = "brain_" + str(i) - props["doctor"] = "Dr. Strange Love" - - img_params = {} - img_params["properties"] = props - img_params["operations"] = [op_params_resize] - img_params["format"] = "png" - - query = {} - query["AddImage"] = img_params - - all_queries.append(query) - - response, img_array = db.query(all_queries, [imgs_arr]) - - response = json.loads(response) - self.assertEqual(len(response), number_of_inserts) - for i in range(0, number_of_inserts): - self.assertEqual(response[i]["AddImage"]["status"], 0) - - def test_findEntityImage(self): - db = vdms.VDMS() - db.connect(hostname, port) - - prefix_name = "fent_brain_" - - for i in range(0,2): - props = {} - props["name"] = prefix_name + str(i) - self.insertImage(db, props=props) - - all_queries = [] - - for i in range(0,2): - constraints = {} - constraints["name"] = ["==", prefix_name + str(i)] - - results = {} - results["list"] = ["name"] - - img_params = {} - img_params["constraints"] = constraints - img_params["results"] = results - img_params["class"] = "AT:IMAGE" - - query = {} - query["FindEntity"] = img_params - - all_queries.append(query) - - response, img_array = db.query(all_queries) - # print vdms.aux_print_json(response) - - response = json.loads(response) - self.assertEqual(response[0]["FindEntity"]["status"], 0) - self.assertEqual(response[1]["FindEntity"]["status"], 0) - self.assertEqual(response[0]["FindEntity"]["entities"][0]["name"], prefix_name + "0") - self.assertEqual(response[1]["FindEntity"]["entities"][0]["name"], prefix_name + "1") - - def test_findImage(self): - db = vdms.VDMS() - db.connect(hostname, port) - - prefix_name = "fimg_brain_" - - for i in range(0,2): - props = {} - props["name"] = prefix_name + str(i) - self.insertImage(db, props=props) - - all_queries = [] - - for i in range(0,2): - constraints = {} - constraints["name"] = ["==", prefix_name + str(i)] - - img_params = {} - img_params["constraints"] = constraints - - - query = {} - query["FindImage"] = img_params - - all_queries.append(query) - - response, img_array = db.query(all_queries) - # print vdms.aux_print_json(response) - - response = json.loads(response) - self.assertEqual(response[0]["FindImage"]["status"], 0) - self.assertEqual(response[1]["FindImage"]["status"], 0) - # self.assertEqual(response[0]["FindImage"]["entities"][0]["name"], prefix_name + "0") - # self.assertEqual(response[1]["FindImage"]["entities"][0]["name"], prefix_name + "1") - self.assertEqual(len(img_array), 2) - - def test_addImageWithLink(self): - db = vdms.VDMS() - db.connect(hostname, port) - - all_queries = [] - - props = {} - props["name"] = "Luis" - props["lastname"] = "Ferro" - props["age"] = 27 - - addEntity = {} - addEntity["_ref"] = 32 - addEntity["properties"] = props - addEntity["class"] = "AwesomePeople" - - query = {} - query["AddEntity"] = addEntity - - all_queries.append(query) - - props = {} - props["name"] = "Luis" - props["lastname"] = "Malo" - props["age"] = 27 - - link = {} - link["ref"] = 32 - link["direction"] = "in" - link["class"] = "Friends" - - addImage = {} - addImage["properties"] = props - addImage["link"] = link - - imgs_arr = [] - - fd = open("../test_images/brain.png") - imgs_arr.append(fd.read()) - - img_params = {} - - query = {} - query["AddImage"] = addImage - - all_queries.append(query) - - # print json.dumps(all_queries) - # vdms.aux_print_json(all_queries) - - response, res_arr = db.query(all_queries, [imgs_arr]) - response = json.loads(response) - # vdms.aux_print_json(response) - - self.assertEqual(response[0]["AddEntity"]["status"], 0) - self.assertEqual(response[1]["AddImage"]["status"], 0) - - def test_findImage_multiple_res(self): - db = vdms.VDMS() - db.connect(hostname, port) - - prefix_name = "fimg_brain_multiple" - - number_of_inserts = 4 - for i in range(0,number_of_inserts): - props = {} - props["name"] = prefix_name - self.insertImage(db, props=props) - - constraints = {} - constraints["name"] = ["==", prefix_name] - - results = {} - results["list"] = ["name"] - - img_params = {} - img_params["constraints"] = constraints - - query = {} - query["FindImage"] = img_params - - all_queries = [] - all_queries.append(query) - - response, img_array = db.query(all_queries) - # print vdms.aux_print_json(response) - - response = json.loads(response) - self.assertEqual(len(img_array), number_of_inserts) - self.assertEqual(response[0]["FindImage"]["status"], 0) - self.assertEqual(response[0]["FindImage"]["returned"], number_of_inserts) - - # This test is failing. - def ztest_zFindImageWithCollection(self): - db = vdms.VDMS() - db.connect(hostname, port) - - prefix_name = "fimg_brain_collection_" - number_of_inserts = 4 - - colls = {} - colls = ["brainScans"] - - for i in range(0,number_of_inserts): - props = {} - props["name"] = prefix_name + str(i) - - self.insertImage(db, props=props, collections=colls) - - all_queries = [] - - for i in range(0,1): - - results = {} - results["list"] = ["name"] - - img_params = {} - img_params["collections"] = ["brainScans"] - img_params["results"] = results - - query = {} - query["FindImage"] = img_params - - all_queries.append(query) - - response, img_array = db.query(all_queries) - # print vdms.aux_print_json(response) - - response = json.loads(response) - self.assertEqual(response[0]["FindImage"]["status"], 0) - self.assertEqual(len(img_array), number_of_inserts) - - if __name__ == '__main__': unittest.main() diff --git a/tests/python/run_python_tests.sh b/tests/python/run_python_tests.sh index 43700bcd..bef45ae0 100644 --- a/tests/python/run_python_tests.sh +++ b/tests/python/run_python_tests.sh @@ -1,7 +1,8 @@ -sh clean.sh +rm log.log screen.log +rm -r test_db ../../vdms -cfg config-tests.json > screen.log 2> log.log & -python main.py -v +python -m unittest discover --pattern=*.py -v sleep 1 pkill vdms diff --git a/tests/test_videos/Megamind.avi b/tests/test_videos/Megamind.avi new file mode 100644 index 00000000..86351eb0 Binary files /dev/null and b/tests/test_videos/Megamind.avi differ diff --git a/utils/SConscript b/utils/SConscript index eebf1d82..6c33c145 100644 --- a/utils/SConscript +++ b/utils/SConscript @@ -1,9 +1,9 @@ Import('env') def compileProtoFiles(utils_env): - #Compile .proto file to generate protobuf files (.h and .cc). + #Compile .proto file to generate protobuf files (.h and .cc). - protoQuery = utils_env.Command ( + protoQuery = utils_env.Command ( ['include/protobuf/queryMessage.pb.h', 'src/protobuf/queryMessage.pb.cc', '../client/python/vdms/queryMessage_pb2.py', @@ -16,7 +16,7 @@ def compileProtoFiles(utils_env): utils/include/protobuf/queryMessage.pb.h' ) - protoPMGD = utils_env.Command ( + protoPMGD = utils_env.Command ( ['include/protobuf/pmgdMessages.pb.h', 'src/protobuf/pmgdMessages.pb.cc', ], # TARGET @@ -28,7 +28,7 @@ def compileProtoFiles(utils_env): ) def createAPISchemaString(utils_env): - api_schema = utils_env.Command ( + api_schema = utils_env.Command ( 'include/api_schema/APISchema.h', # $TARGET ['src/api_schema/api_schema.json', 'src/api_schema/createApiString.py'], # $SOURCE @@ -60,14 +60,13 @@ chrono_cc = ['src/chrono/Chrono.cc'] utils_source_files = [comm_cc, protobuf_cc, chrono_cc] utils_env.ParseConfig('pkg-config --cflags --libs protobuf') -utils_env.SharedLibrary('vdms-utils', utils_source_files) +ulib = utils_env.SharedLibrary('vdms-utils', utils_source_files) # Comm Testing comm_test_env = Environment(CPPPATH=['include/comm'], - CXXFLAGS="-std=c++11") + CXXFLAGS="-std=c++11", + LIBS = [ulib, 'pthread', 'gtest'] + ) comm_test_source_files = "test/comm/UnitTests.cc"; -comm_test_env.Program('test/comm/comm_test', comm_test_source_files, - LIBS = ['vdms-utils', 'pthread', 'gtest'], - LIBPATH = ['.'] - ) +comm_test = comm_test_env.Program('test/comm/comm_test', comm_test_source_files) diff --git a/utils/src/api_schema/api_schema.json b/utils/src/api_schema/api_schema.json index 4f9abef6..ef33303c 100644 --- a/utils/src/api_schema/api_schema.json +++ b/utils/src/api_schema/api_schema.json @@ -35,10 +35,29 @@ "type": "object", "anyOf": [ { "$ref": "#/definitions/AddEntityTop" }, + { "$ref": "#/definitions/UpdateEntityTop" }, { "$ref": "#/definitions/FindEntityTop" }, - { "$ref": "#/definitions/ConnectTop" }, + + { "$ref": "#/definitions/AddConnectionTop" }, + { "$ref": "#/definitions/UpdateConnectionTop" }, + { "$ref": "#/definitions/FindConnectionTop" }, + { "$ref": "#/definitions/AddImageTop" }, - { "$ref": "#/definitions/FindImageTop" } + { "$ref": "#/definitions/UpdateImageTop" }, + { "$ref": "#/definitions/FindImageTop" }, + + { "$ref": "#/definitions/AddDescriptorSetTop" }, + { "$ref": "#/definitions/AddDescriptorTop" }, + { "$ref": "#/definitions/ClassifyDescriptorTop" }, + { "$ref": "#/definitions/FindDescriptorTop" }, + + { "$ref": "#/definitions/AddBoundingBoxTop" }, + { "$ref": "#/definitions/UpdateBoundingBoxTop" }, + { "$ref": "#/definitions/FindBoundingBoxTop" }, + + { "$ref": "#/definitions/AddVideoTop" }, + { "$ref": "#/definitions/UpdateVideoTop" }, + { "$ref": "#/definitions/FindVideoTop" } ] }, "uniqueItems": false, @@ -57,28 +76,73 @@ "minimum": 0 }, + "positiveDouble": { + "type": "double", + "minimum": 0.0 + }, + "stringArray": { "type": "array", "items": {"type": "string"}, "minimum": 1 }, + "blob": { + "type": "boolean" + }, + "refInt": { "type": "integer", "minimum": 1, "maximun": 10000 }, - "formatString": { + "imgFormatString": { "type": "string", "enum": ["png", "jpg"] }, + "vidCodecString": { + "type": "string", + "enum": ["xvid", "h264", "h263"] + }, + + "vidContainerString": { + "type": "string", + "enum": ["mp4", "avi", "mov"] + }, + + "unitString": { + "type": "string", + "enum": ["frames", "seconds"] + }, + + "edgeDirectionString": { "type": "string", "enum": ["in", "out", "any"] }, + "sortOrder": { + "type": "string", + "enum": ["ascending", "descending"] + }, + + "sortBlock": { + "type": "object", + "properties": { + "key": { "type": "string" }, + "order": { "$ref": "#/definitions/sortOrder" } + }, + "required": ["key"], + "additionalProperties": false + }, + + "oneOfSort": { + "oneOf": [ {"type": "string" }, + {"$ref": "#/definitions/sortBlock" }] + }, + // Blocks "blockLink": { @@ -93,14 +157,44 @@ "additionalProperties": false }, - "blockOperations": { + "blockResults": { + "type": "object", + "properties": { + "list": { "type": "array" }, + "average": { "type": "string" }, + "count": { "type": "string" }, + "sum": { "type": "string" }, + "limit": { "$ref": "#/definitions/positiveInt" }, + "sort": { "$ref": "#/definitions/oneOfSort"}, + "blob": { "$ref": "#/definitions/blob" } + }, + "additionalProperties": false + }, + + "blockImageOperations": { + "type": "array", + "minItems": 1, + "items": { + "anyOf": [ + { "$ref": "#/definitions/operationThreshold" }, + { "$ref": "#/definitions/operationResize" }, + { "$ref": "#/definitions/operationCrop" }, + { "$ref": "#/definitions/operationFlip" }, + { "$ref": "#/definitions/operationRotate" } + ] + }, + "uniqueItems": false + }, + + "blockVideoOperations": { "type": "array", "minItems": 1, "items": { "anyOf": [ { "$ref": "#/definitions/operationThreshold" }, { "$ref": "#/definitions/operationResize" }, - { "$ref": "#/definitions/operationCrop" } + { "$ref": "#/definitions/operationCrop" }, + { "$ref": "#/definitions/operationInterval" } ] }, "uniqueItems": false @@ -108,12 +202,24 @@ // Operations + "operationInterval": { + "type": "object", + "properties": { + "type": { "enum": [ "interval" ] }, + "start": { "$ref": "#/definitions/nonNegativeInt" }, + "stop": { "$ref": "#/definitions/nonNegativeInt" }, + "step": { "$ref": "#/definitions/positiveInt" } + }, + "required": ["type", "start", "stop"], + "additionalProperties": false + }, + "operationThreshold": { "type": "object", "properties": { "type": { "enum": [ "threshold" ] }, "value": { "$ref": "#/definitions/nonNegativeInt" } - }, + }, "required": ["type", "value"], "additionalProperties": false }, @@ -124,7 +230,7 @@ "type": { "enum": [ "resize" ] }, "height": { "$ref": "#/definitions/positiveInt" }, "width": { "$ref": "#/definitions/positiveInt" } - }, + }, "required": ["type", "height", "width"], "additionalProperties": false }, @@ -137,11 +243,46 @@ "y": { "$ref": "#/definitions/nonNegativeInt" }, "height": { "$ref": "#/definitions/positiveInt" }, "width": { "$ref": "#/definitions/positiveInt" } - }, + }, "required": ["type", "x", "y", "height", "width"], "additionalProperties": false }, + "operationFlip": { + "type": "object", + "properties": { + "type": { "enum": [ "flip" ] }, + "code": { "type": "integer" } + }, + "required": ["type", "code"], + "additionalProperties": false + }, + + "operationRotate": { + "type": "object", + "properties": { + "type": { "enum": [ "rotate" ] }, + "angle": { "type": "number" }, + "resize": { "type": "boolean" } + }, + "required": ["type", "angle", "resize"], + "additionalProperties": false + }, + + // Shapes + + "shapeRectangle": { + "type": "object", + "properties": { + "x": { "$ref": "#/definitions/nonNegativeInt" }, + "y": { "$ref": "#/definitions/nonNegativeInt" }, + "w": { "$ref": "#/definitions/positiveInt" }, + "h": { "$ref": "#/definitions/positiveInt" } + }, + "required": ["x", "y", "w", "h"], + "additionalProperties": false + }, + // Top-Level Commands (needed for the Schema) "AddEntityTop": { @@ -151,6 +292,13 @@ "additionalProperties": false }, + "UpdateEntityTop": { + "properties": { + "UpdateEntity" : { "type": "object", "$ref": "#/definitions/UpdateEntity" } + }, + "additionalProperties": false + }, + "FindEntityTop": { "properties": { "FindEntity" : { "type": "object", "$ref": "#/definitions/FindEntity" } @@ -158,9 +306,23 @@ "additionalProperties": false }, - "ConnectTop": { + "AddConnectionTop": { "properties": { - "Connect" : { "type": "object", "$ref": "#/definitions/Connect" } + "AddConnection" : { "type": "object", "$ref": "#/definitions/AddConnection" } + }, + "additionalProperties": false + }, + + "UpdateConnectionTop": { + "properties": { + "UpdateConnection" : { "type": "object", "$ref": "#/definitions/UpdateConnection" } + }, + "additionalProperties": false + }, + + "FindConnectionTop": { + "properties": { + "FindConnection" : { "type": "object", "$ref": "#/definitions/FindConnection" } }, "additionalProperties": false }, @@ -172,13 +334,92 @@ "additionalProperties": false }, - "FindImageTop": { + "UpdateImageTop": { + "properties": { + "UpdateImage" : { "type": "object", "$ref": "#/definitions/UpdateImage" } + }, + "additionalProperties": false + }, + + "FindImageTop": { "properties": { "FindImage" : { "type": "object", "$ref": "#/definitions/FindImage" } }, "additionalProperties": false }, + "AddDescriptorTop": { + "properties": { + "AddDescriptor" : { "type": "object", + "$ref": "#/definitions/AddDescriptor" } + }, + "additionalProperties": false + }, + + "AddDescriptorSetTop": { + "properties": { + "AddDescriptorSet" : { "type": "object", + "$ref": "#/definitions/AddDescriptorSet" } + }, + "additionalProperties": false + }, + + "ClassifyDescriptorTop": { + "properties": { + "ClassifyDescriptor" : { "type": "object", "$ref": "#/definitions/ClassifyDescriptor" } + }, + "additionalProperties": false + }, + + "FindDescriptorTop": { + "properties": { + "FindDescriptor" : { "type": "object", "$ref": "#/definitions/FindDescriptor" } + }, + "additionalProperties": false + }, + + "AddBoundingBoxTop": { + "properties": { + "AddBoundingBox" : { "type": "object", "$ref": "#/definitions/AddBoundingBox" } + }, + "additionalProperties": false + }, + + "UpdateBoundingBoxTop": { + "properties": { + "UpdateBoundingBox" : { "type": "object", "$ref": "#/definitions/UpdateBoundingBox" } + }, + "additionalProperties": false + }, + + "FindBoundingBoxTop": { + "properties": { + "FindBoundingBox" : { "type": "object", "$ref": "#/definitions/FindBoundingBox" } + }, + "additionalProperties": false + }, + + "AddVideoTop": { + "properties": { + "AddVideo" : { "type": "object", "$ref": "#/definitions/AddVideo" } + }, + "additionalProperties": false + }, + + "UpdateVideoTop": { + "properties": { + "UpdateVideo" : { "type": "object", "$ref": "#/definitions/UpdateVideo" } + }, + "additionalProperties": false + }, + + "FindVideoTop": { + "properties": { + "FindVideo" : { "type": "object", "$ref": "#/definitions/FindVideo" } + }, + "additionalProperties": false + }, + // Commands "AddEntity": { @@ -186,6 +427,7 @@ "class": { "type": "string" }, "_ref": { "$ref": "#/definitions/refInt" }, "link": { "$ref": "#/definitions/blockLink" }, + "blob": { "$ref": "#/definitions/blob" }, "properties": { "type": "object" }, "constraints": { "type": "object" } }, @@ -193,7 +435,18 @@ "additionalProperties": false }, - "Connect": { + "UpdateEntity": { + "properties": { + "class": { "type": "string" }, + "_ref": { "$ref": "#/definitions/refInt" }, + "properties": { "type": "object" }, + "remove_props": { "$ref": "#/definitions/stringArray" }, + "constraints": { "type": "object" } + }, + "additionalProperties": false + }, + + "AddConnection": { "properties": { "class": { "type": "string" }, "ref1": { "$ref": "#/definitions/refInt" }, @@ -204,44 +457,202 @@ "additionalProperties": false }, + "UpdateConnection": { + "properties": { + "class": { "type": "string" }, + "_ref": { "$ref": "#/definitions/refInt" }, + "ref1": { "$ref": "#/definitions/refInt" }, + "ref2": { "$ref": "#/definitions/refInt" }, + "properties": { "type": "object" }, + "remove_props": { "$ref": "#/definitions/stringArray" }, + "constraints": { "type": "object" } + }, + "additionalProperties": false + }, + "FindEntity": { "properties": { "class": { "type": "string" }, "_ref": { "$ref": "#/definitions/refInt" }, "link": { "$ref": "#/definitions/blockLink" }, "constraints": { "type": "object" }, - "results": { "type": "object" }, + "results": { "$ref": "#/definitions/blockResults" }, "unique": { "type": "boolean" } }, + "additionalProperties": false + }, - "required": ["class"], + "FindConnection": { + "properties": { + "class": { "type": "string" }, + "_ref": { "$ref": "#/definitions/refInt" }, + "ref1": { "$ref": "#/definitions/refInt" }, + "ref2": { "$ref": "#/definitions/refInt" }, + "constraints": { "type": "object" }, + "results": { "type": "object" }, + "unique": { "type": "boolean" } + }, "additionalProperties": false }, "AddImage": { "properties": { "_ref": { "$ref": "#/definitions/refInt" }, - "format": { "$ref": "#/definitions/formatString" }, + "format": { "$ref": "#/definitions/imgFormatString" }, "link": { "$ref": "#/definitions/blockLink" }, - "operations": { "$ref": "#/definitions/blockOperations" }, - "collections":{ "$ref": "#/definitions/stringArray" }, + "operations": { "$ref": "#/definitions/blockImageOperations" }, "properties": { "type": "object" } }, "additionalProperties": false }, + "UpdateImage": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "properties": { "type": "object" }, + "remove_props": { "$ref": "#/definitions/stringArray" }, + "unique": { "type": "boolean" }, + "constraints": { "type": "object" } + }, + "additionalProperties": false + }, + "FindImage": { "properties": { "_ref": { "$ref": "#/definitions/refInt" }, "link": { "$ref": "#/definitions/blockLink" }, - "operations": { "$ref": "#/definitions/blockOperations" }, - "format": { "$ref": "#/definitions/formatString" }, + "operations": { "$ref": "#/definitions/blockImageOperations" }, + "format": { "$ref": "#/definitions/imgFormatString" }, "constraints": { "type": "object" }, - "results": { "type": "object" }, + "results": { "$ref": "#/definitions/blockResults" }, + "unique": { "type": "boolean" } + }, + "additionalProperties": false + }, + + "AddDescriptorSet": { + "properties": { + "name": { "type": "string" }, + "dimensions": { "$ref": "#/definitions/refInt" }, + "link": { "$ref": "#/definitions/blockLink" }, + "properties": { "type": "object" } + }, + "required": ["name", "dimensions"], + "additionalProperties": false + }, + + "AddDescriptor": { + "properties": { + "set": { "type": "string" }, + "label": { "type": "string" }, + "_ref": { "$ref": "#/definitions/refInt" }, + "link": { "$ref": "#/definitions/blockLink" }, + "properties": { "type": "object" } + }, + "required": ["set"], + "additionalProperties": false + }, + + "ClassifyDescriptor": { + "properties": { + "set": { "type": "string" }, + "_ref": { "$ref": "#/definitions/refInt" }, + "k_neighbors": { "$ref": "#/definitions/positiveInt" } + }, + "required": ["set"], + "additionalProperties": false + }, + + "FindDescriptor": { + "properties": { + "set": { "type": "string" }, + "_ref": { "$ref": "#/definitions/refInt" }, + "k_neighbors": { "$ref": "#/definitions/positiveInt" }, + "results": { "$ref": "#/definitions/blockResults" }, + "constraints": { "type": "object" }, + "properties": { "type": "object" } + }, + "required": ["set"], + "additionalProperties": false + }, + + "AddBoundingBox": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "rectangle": { "$ref": "#/definitions/shapeRectangle" }, + "image": { "$ref": "#/definitions/refInt" }, + "link": { "$ref": "#/definitions/blockLink" }, + "properties": { "type": "object" } + }, + "required": ["rectangle"], + "additionalProperties": false + }, + + "UpdateBoundingBox": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "rectangle": { "$ref": "#/definitions/shapeRectangle" }, + "properties": { "type": "object" }, + "remove_props": { "$ref": "#/definitions/stringArray" }, + "unique": { "type": "boolean" }, + "constraints": { "type": "object" } + }, + "additionalProperties": false + }, + + "FindBoundingBox": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "rectangle": { "$ref": "#/definitions/shapeRectangle" }, + "image": { "$ref": "#/definitions/refInt" }, + "link": { "$ref": "#/definitions/blockLink" }, + "constraints": { "type": "object" }, + "format": { "$ref": "#/definitions/imgFormatString" }, + "results": { "$ref": "#/definitions/blockResults" }, + "unique": { "type": "boolean" } + }, + "not": { + "anyOf": [ {"required": ["image", "link"] } ] + }, + "additionalProperties": false + }, + + "AddVideo": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "codec": { "$ref": "#/definitions/vidCodecString" }, + "container": { "$ref": "#/definitions/vidContainerString" }, + "link": { "$ref": "#/definitions/blockLink" }, + "operations": { "$ref": "#/definitions/blockVideoOperations" }, + "properties": { "type": "object" } + }, + "additionalProperties": false + }, + + "UpdateVideo": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "properties": { "type": "object" }, + "remove_props": { "$ref": "#/definitions/stringArray" }, + "constraints": { "type": "object" } + }, + "additionalProperties": false + }, + + "FindVideo": { + "properties": { + "_ref": { "$ref": "#/definitions/refInt" }, + "link": { "$ref": "#/definitions/blockLink" }, + "operations": { "$ref": "#/definitions/blockVideoOperations" }, + "codec": { "$ref": "#/definitions/vidCodecString" }, + "container": { "$ref": "#/definitions/vidContainerString" }, + "constraints": { "type": "object" }, + "results": { "$ref": "#/definitions/blockResults" }, "unique": { "type": "boolean" } }, "additionalProperties": false } + } } diff --git a/utils/src/protobuf/pmgdMessages.proto b/utils/src/protobuf/pmgdMessages.proto index 4f001e5a..a683ea48 100644 --- a/utils/src/protobuf/pmgdMessages.proto +++ b/utils/src/protobuf/pmgdMessages.proto @@ -88,6 +88,36 @@ message AddEdge Edge edge = 2; } +message UpdateNode +{ + // Specify an identifier previously found or -1 for no caching + int64 identifier = 1; + + // Give a query constraint if id < 0 or no QueryNode before. + QueryNode query_node = 2; + + // Specify properties to update. + repeated Property properties = 3; + + // Specify properties to remove + repeated string remove_props = 4; +} + +message UpdateEdge +{ + // Specify an identifier previously found or -1 for no caching + int64 identifier = 1; + + // Give a query constraint if id < 0 or no QueryEdge before. + QueryEdge query_edge = 2; + + // Specify properties to update. + repeated Property properties = 3; + + // Specify properties to remove + repeated string remove_props = 4; +} + message PropertyPredicate { // These op values need to keep matching the PMGD counterparts. @@ -179,18 +209,9 @@ message LinkInfo bool nb_unique = 14; } -message QueryNode +// Define a standard constraint block that can be used by QueryNode and Edge +message Constraints { - // _ref for this search. - // TODO Support caching for the neighbor version of this call too - int64 identifier = 1; - - // For the version of FindEntity that has link in it, this - // member will be defined. Rest of the tag and property - // constraints will then apply to the neighbors, not the start - // node, except the unique constraint. - LinkInfo link = 2; - // In case of QueryNeighbor, this is the tag constrait for the // found neighbors. Similarly for the remaining variables here. oneof tag_oneof { @@ -204,6 +225,15 @@ message QueryNode PredicateOp p_op = 7; repeated PropertyPredicate predicates = 8; + // Indicate whether we should make sure there was only one value + // that matched the constraints. + // TODO Support this for QueryNeighbor + bool unique = 12; +} + +// Define a results block also to be shared across. +message ResultInfo +{ // Specify what to do with the responses. If the option is // list, the response properties will indicate list of what // properties. @@ -215,11 +245,6 @@ message QueryNode // repeated properties repeated string response_keys = 11; - // Indicate whether we should make sure there was only one value - // that matched the constraints. - // TODO Support this for QueryNeighbor - bool unique = 12; - // Should the results be sorted? Default is false. // If sort is true, it will sort in all cases. Whether it is to // count or to calculate an average. @@ -227,10 +252,55 @@ message QueryNode // Choose the property key to sort on. string sort_key = 14; + // Choose sorting order (default is false, i.e. ascending) + bool descending = 15; + // Limit the number of results returned or used for calculations uint64 limit = 18; } +message QueryNode +{ + // _ref for this search. + // TODO Support caching for the neighbor version of this call too + int64 identifier = 1; + + // For the version of FindEntity that has link in it, this + // member will be defined. Rest of the tag and property + // constraints will then apply to the neighbors, not the start + // node, except the unique constraint. + LinkInfo link = 2; + + // Specify query constraints + Constraints constraints = 3; + + // Specify how we want results + ResultInfo results = 4; +} + +message QueryEdge +{ + // _ref for this search. + // TODO Support caching for the neighbor version of this call too + int64 identifier = 1; + + // Specify query constraints + Constraints constraints = 3; + + // Specify how we want results + ResultInfo results = 4; + + // If you want to match source and/or destination nodes, + // we need ref/id value to find nodes for those. + // The src_node_id is where we will start traversing the + // edges from and determine if they meet the search criteria + // given above. The destination nodes will then be searched + // for the matching edges out of the ones cached for the + // given destination node reference. + int64 src_node_id = 5; + int64 dest_node_id = 6; +} + message Command { enum CommandId { @@ -240,8 +310,11 @@ message Command AddNode = 0x21; AddEdge = 0x22; + UpdateNode = 0x23; + UpdateEdge = 0x24; QueryNode = 0x30; + QueryEdge = 0x31; } // TODO Might need a transaction object here or maybe just another variable @@ -260,7 +333,10 @@ message Command //we define all cmds type here. AddNode add_node = 10; AddEdge add_edge = 11; + UpdateNode update_node = 12; + UpdateEdge update_edge = 13; QueryNode query_node = 15; + QueryEdge query_edge = 16; } message PropertyList @@ -313,4 +389,8 @@ message CommandResponse // For the query, multiple nodes could have met the condition // and therefore there can be a list per property key. map prop_values = 7; + + // Indicate if the response is for a node or edge so we can populate + // JSON correctly, especially for queries. + bool node_edge = 8; }