diff --git a/.clang-format b/.clang-format new file mode 100644 index 000000000000..3ddd8b43f623 --- /dev/null +++ b/.clang-format @@ -0,0 +1,90 @@ +--- +Language: Cpp +# BasedOnStyle: Google +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: true +AlignEscapedNewlinesLeft: true +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: All +AllowShortIfStatementsOnASingleLine: true +AllowShortLoopsOnASingleLine: true +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: true +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +IncludeCategories: + - Regex: '^<.*\.h>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IndentCaseLabels: true +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Right +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Auto +TabWidth: 8 +UseTab: Never +... + diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000000..5aae9fa8f71f --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +build/ +.vscode/ +.idea/ +cmake-build-debug/ +cscope.out +.DS_Store +debug/ +release/ +target/ +debs/ +rpms/ +*.pyc +*.tmp diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000000..a7479bc8c80e --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,65 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) +PROJECT(TDengine) + +SET(CMAKE_C_STANDARD 11) +SET(CMAKE_VERBOSE_MAKEFILE ON) + +# +## generate debug version: +## mkdir debug; cd debug; cmake -DCMAKE_BUILD_TYPE=Debug .. +## generate release version: +## mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. +# + +#set macro definitions +ADD_DEFINITIONS(-D_REENTRANT -D_M_X64 -D__USE_POSIX -D_LIBC_REENTRANT) + +#set macro definitions according to os platform +IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR UNIX) + ADD_DEFINITIONS(-DLINUX -D_LINUX) +ENDIF () + +#set debug & release related options +SET(COMMON_FLAGS "-Wall -fPIC -malign-double -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") +SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11") +SET(DEBUG_FLAGS "-O0 -DDEBUG") + +#compiler debug options +SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}") +SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}") + +SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS}") +SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS}") + +MESSAGE(STATUS "Project source directory: " ${PROJECT_SOURCE_DIR}) + +IF (${CMAKE_BUILD_TYPE} MATCHES "Release") + MESSAGE(STATUS "Build Release Version") +ELSE () + SET(CMAKE_BUILD_TYPE "Debug") + MESSAGE(STATUS "Build Debug Version") +ENDIF () + +#set output directory +SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) +SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin) +SET(TESTS_OUTPUT_DIR ${PROJECT_BINARY_DIR}/build/test) + +MESSAGE(STATUS "Project binary output path: " ${PROJECT_BINARY_DIR}) +MESSAGE(STATUS "Executable files output path: " ${EXECUTABLE_OUTPUT_PATH}) +MESSAGE(STATUS "Project library output path: " ${LIBRARY_OUTPUT_PATH}) + +# check for x64 platform +IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8) + MESSAGE(STATUS "Check targe platform x64, pass") + ADD_SUBDIRECTORY(deps) + ADD_SUBDIRECTORY(src) + ADD_SUBDIRECTORY(tests) +ELSE () + MESSAGE(FATAL_ERROR "Check targe platform x86, not supported yet") +ENDIF () + +SET(MAKE_INSTALL_SH "${PROJECT_SOURCE_DIR}/packaging/tools/make_install.sh") +INSTALL(CODE "MESSAGE(\"make install script: ${MAKE_INSTALL_SH}\")") +INSTALL(CODE "execute_process(COMMAND chmod 777 ${MAKE_INSTALL_SH})") +INSTALL(CODE "execute_process(COMMAND ${MAKE_INSTALL_SH} ${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR})") \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..76cb0e0f31a6 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,78 @@ +# Contributor Covenant Code of Conduct +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg)](code_of_conduct.md) + + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at support@taosdata.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..91b0b20ff676 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +We appreciate contributions from all developers. Feel free to follow us, fork the repository, report bugs and even submit your code on GibHub. However, we would like developers to follow our guides to contribute for better corporation. + +## Report bugs + +Any users can report bugs to us through the [github issue tracker](https://github.com/taosdata/TDengine/issues). We appreciate a detailed description of the problem you met. It is better to provide the detailed steps on reproducing the bug. Otherwise, an appendix with log files generated by the bug is welcome. + +## Sign the contributor license agreement + +It is required to sign the Contributor Licence Agreement(CLA) before a user submitting your code patch. Follow the [TaosData CLA](https://www.taosdata.com/en/contributor/) link to access the agreement and instructions on how to sign it. + +## Submit your code + +Before submitting your code, make sure to [sign the contributor license agreement](#sign-the-contributor-license-agreement) beforehand. Your submission should solve an issue or add a feature registered in the [github issue tracker](https://github.com/taosdata/TDengine/issues). If no corresponding issue or feature is found in the issue tracker, please create one. When submitting your code to our repository, please create a pull request with the issue number included. diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000000..0ad25db4bd1d --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md new file mode 100644 index 000000000000..d49ba169d6dc --- /dev/null +++ b/README.md @@ -0,0 +1,99 @@ +[![TDengine](TDenginelogo.png)](https://www.taosdata.com/en) + +# What is TDengine? + +TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html), designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT infrastructure and Application Monitoring. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and cost of development and operation. + +- **10X Faster on Insert/Query Speeds**: Through the innovated design on storage, on a single-core, over 20K requests can be processed, millions of data points can be ingested, and over 10 million data points can be retrieved in a second. It is 10 times faster than other databases. + +- **1/5 Hardware/Cloud Service Costs**: Compared with typical big data solutions, less than 1/5 of computing resources are required. Via column-based storage and tuned compression algorithms for different data types, less than 1/10 of storage space is needed. + +- **Full Stack for Time-Series Data**: By integrating a database with message queuing, caching, and stream computing features together, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software. It makes system architecture simpler and more robust. + +- **Powerful Data Analysis**: Whether it is 10 years or one minute ago, data can be queried just by specifying the time range. Data can be aggregated over time, multiple time streams or both. Ad Hoc queries or analyses can be executed via TDengine shell, Python, R or Matlab. + +- **Seamless Integration with Other Tools**: Telegraf, Grafana, Matlab, R, and other tools can be integrated with TDengine without a line of code. MQTT, OPC, Hadoop, Spark, and many others will be integrated soon. + +- **Zero Management, No Learning Curve**: It takes only seconds from download to install and run it successfully, there are not any dependencies. Automatic partitioning on tables or DBs. Standard SQL is used, with C/C++, Python, JDBC, Go and RESTful connectors. + +# Documentation +Refer to [TDengine Documentation](https://www.taosdata.com/en/documentation/) + for details. + +# Building +Build TDengine with [CMake](https://cmake.org/) in the project directory: + +```cmd +mkdir build && cd build +cmake .. && cmake --build . +``` + +# Running + +To start TDengine server, run command below in terminal: +```cmd +./build/bin/taosd -c test/cfg +``` +In another terminal, use TDengine shell to connect the server: +``` +./build/bin/taos -c test/cfg +``` + +# Installing +After building successfully, TDengine can be installed by: +```cmd +make install +``` +Users can find more information about directories installed on the system in the [file directory] section. It should be noted that installing from source code does not configure service management for TDengine. +Users can also choose to [install from packages] for it. + +Start the service in the terminal. +```cmd +taosd +``` + +Then users can use the [TDengine shell](https://www.taosdata.com/en/getting-started/#TDengine-Shell) to connect the TDengine server. +```cmd +taos +``` + +If the terminal connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. + +# Try TDengine +It is easy to run SQL commands in the terminal which is the same as other SQL databases. +```sql +create database db; +use db; +create table t (ts timestamp, a int); +insert into t values ('2019-07-15 00:00:00', 1); +insert into t values ('2019-07-15 01:00:00', 2); +select * from t; +drop database db; +``` + +# Developing with TDengine +TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find different TDengine language supports. + +- [Java](https://www.taosdata.com/en/documentation/connector/#Java-Connector) +- [C/C++](https://www.taosdata.com/en/documentation/connector/#C/C++-Connector) +- [Python](https://www.taosdata.com/en/documentation/connector/#Python-Connector) +- [Go](https://www.taosdata.com/en/documentation/connector/#Go-Connector) +- [RESTful API](https://www.taosdata.com/en/documentation/connector/#RESTful-Connector) + +# Contribute TDengine + +Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project. diff --git a/TDenginelogo.png b/TDenginelogo.png new file mode 100644 index 000000000000..19a92592d7e8 Binary files /dev/null and b/TDenginelogo.png differ diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt new file mode 100644 index 000000000000..235b444a98c0 --- /dev/null +++ b/deps/CMakeLists.txt @@ -0,0 +1,6 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +ADD_SUBDIRECTORY(zlib-1.2.11) + diff --git a/deps/inc/zconf.h b/deps/inc/zconf.h new file mode 100644 index 000000000000..77398c11a1e2 --- /dev/null +++ b/deps/inc/zconf.h @@ -0,0 +1,534 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols and init macros */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# define adler32_z z_adler32_z +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define crc32_z z_crc32_z +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateGetDictionary z_deflateGetDictionary +# define deflateInit z_deflateInit +# define deflateInit2 z_deflateInit2 +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzfread z_gzfread +# define gzfwrite z_gzfwrite +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzvprintf z_gzvprintf +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit z_inflateBackInit +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCodesUsed z_inflateCodesUsed +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetDictionary z_inflateGetDictionary +# define inflateGetHeader z_inflateGetHeader +# define inflateInit z_inflateInit +# define inflateInit2 z_inflateInit2 +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateResetKeep z_inflateResetKeep +# define inflateSetDictionary z_inflateSetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateValidate z_inflateValidate +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# define uncompress2 z_uncompress2 +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +#ifdef Z_SOLO + typedef unsigned long z_size_t; +#else +# define z_longlong long long +# if defined(NO_SIZE_T) + typedef unsigned NO_SIZE_T z_size_t; +# elif defined(STDC) +# include + typedef size_t z_size_t; +# else + typedef unsigned long z_size_t; +# endif +# undef z_longlong +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus about 7 kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#if 1 /* was set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#if 1 /* was set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff --git a/deps/inc/zlib.h b/deps/inc/zlib.h new file mode 100644 index 000000000000..f09cdaf1e054 --- /dev/null +++ b/deps/inc/zlib.h @@ -0,0 +1,1912 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). +*/ + +#ifndef ZLIB_H +#define ZLIB_H + +#include "zconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZLIB_VERSION "1.2.11" +#define ZLIB_VERNUM 0x12b0 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 2 +#define ZLIB_VER_REVISION 11 +#define ZLIB_VER_SUBREVISION 0 + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio using the functions that start + with "gz". The gzip format is different from the zlib format. gzip is a + gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + + This library can optionally read and write gzip and raw deflate streams in + memory as well. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in the case of corrupted input. +*/ + +typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); +typedef void (*free_func) OF((voidpf opaque, voidpf address)); + +struct internal_state; + +typedef struct z_stream_s { + z_const Bytef *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total number of input bytes read so far */ + + Bytef *next_out; /* next output byte will go here */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total number of bytes output so far */ + + z_const char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: binary or text + for deflate, or the decoding state for inflate */ + uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream FAR *z_streamp; + +/* + gzip header information passed to and from zlib routines. See RFC 1952 + for more details on the meanings of these fields. +*/ +typedef struct gz_header_s { + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef *extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ + uInt extra_max; /* space at extra (only when reading header) */ + Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max; /* space at name (only when reading header) */ + Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max; /* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + when writing a gzip file) */ +} gz_header; + +typedef gz_header FAR *gz_headerp; + +/* + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. In that case, zlib is thread-safe. When zalloc and zfree are + Z_NULL on entry to the initialization function, they are set to internal + routines that use the standard library functions malloc() and free(). + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use by the decompressor (particularly + if the decompressor wants to decompress everything in a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_SYNC_FLUSH 2 +#define Z_FULL_FLUSH 3 +#define Z_FINISH 4 +#define Z_BLOCK 5 +#define Z_TREES 6 +/* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_RLE 3 +#define Z_FIXED 4 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_TEXT 1 +#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ +#define Z_UNKNOWN 2 +/* Possible values of the data_type field for deflate() */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() +/* for compatibility with versions < 1.0.2 */ + + + /* basic functions */ + +ZEXTERN const char * ZEXPORT zlibVersion OF((void)); +/* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. + */ + +/* +ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). +*/ + + +ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); +/* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary. Some output may be provided even if + flush is zero. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. See deflatePending(), + which can be used if desired to determine whether or not there is more ouput + in that case. + + Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + decide how much data to accumulate before producing output, in order to + maximize compression. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed + codes block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out is greater than six to avoid repeated flush markers due to + avail_out == 0 on return. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this + function must be called again with Z_FINISH and more output space (updated + avail_out) but no more input data, until it returns with Z_STREAM_END or an + error. After deflate has returned Z_STREAM_END, the only possible operations + on the stream are deflateReset or deflateEnd. + + Z_FINISH can be used in the first deflate call after deflateInit if all the + compression is to be done in a single step. In order to complete in one + call, avail_out must be at least the value returned by deflateBound (see + below). Then deflate is guaranteed to return Z_STREAM_END. If not enough + output space is provided, deflate will not return Z_STREAM_END, and it must + be called again as described above. + + deflate() sets strm->adler to the Adler-32 checksum of all input read + so far (that is, total_in bytes). If a gzip stream is being generated, then + strm->adler will be the CRC-32 checksum of the input read so far. (See + deflateInit2 below.) + + deflate() may update strm->data_type if it can make a good guess about + the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is + considered binary. This field is only for information purposes and does not + affect the compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was Z_NULL or the state was inadvertently written over + by the application), or Z_BUF_ERROR if no progress is possible (for example + avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and + deflate() can be called again with more input and more output space to + continue compressing. +*/ + + +ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be + deallocated). +*/ + + +/* +ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, zalloc, zfree and opaque must be initialized before by + the caller. In the current version of inflate, the provided input is not + read or consumed. The allocation of a sliding window will be deferred to + the first call of inflate (if the decompression does not complete on the + first call). If zalloc and zfree are set to Z_NULL, inflateInit updates + them to use default allocation functions. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression. + Actual decompression will be done by inflate(). So next_in, and avail_in, + next_out, and avail_out are unused and unchanged. The current + implementation of inflateInit() does not process any header information -- + that is deferred until inflate() is called. +*/ + + +ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); +/* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), then next_in and avail_in are updated + accordingly, and processing will resume at this point for the next call of + inflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. If the + caller of inflate() does not provide both available input and available + output space, it is possible that there will be no progress made. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + To assist in this, on return inflate() always sets strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the Adler-32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed Adler-32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained unless inflateGetHeader() is used. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + produced so far. The CRC-32 is checked against the gzip trailer, as is the + uncompressed length, modulo 2^32. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value, in which case strm->msg points to a string with a more specific + error), Z_STREAM_ERROR if the stream structure was inconsistent (for example + next_in or next_out was Z_NULL, or the state was inadvertently written over + by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR + if no progress was possible or if there was not enough room in the output + buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is to be attempted. +*/ + + +ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state + was inconsistent. +*/ + + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy)); + + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by the + caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + For the current implementation of deflate(), a windowBits value of 8 (a + window size of 256 bytes) is not supported. As a result, a request for 8 + will result in 9 (a 512-byte window). In that case, providing 8 to + inflateInit2() will result in an error when the zlib header with 9 is + checked against the initialization of inflate(). The remedy is to not use 8 + with deflateInit2() with this initialization, or at least in that case use 9 + with inflateInit2(). + + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data + with no zlib header or trailer, and will not compute a check value. + + windowBits can also be greater than 15 for optional gzip encoding. Add + 16 to windowBits to write a simple gzip header and trailer around the + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to the appropriate value, + if the operating system was determined at compile time. If a gzip stream is + being written, strm->adler is a CRC-32 instead of an Adler-32. + + For raw deflate or gzip encoding, a request for a 256-byte window is + rejected as invalid, since only the zlib header provides a means of + transmitting the window size to the decompressor. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match), or Z_RLE to limit match distances to one (run-length + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman + coding and less string matching; it is somewhat intermediate between + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. + + Upon return of this function, strm->adler is set to the Adler-32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The Adler-32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) If a raw deflate was requested, then the + Adler-32 value is not computed and strm->adler is not set. + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by deflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If deflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + deflateGetDictionary() may return a length less than the window size, even + when more than the window size in input has been provided. It may return up + to 258 bytes less in that case, due to how zlib's implementation of deflate + manages the sliding window and lookahead for matches, where matches can be + up to 258 bytes long. If the application needs the last window-size bytes of + input, then that would need to be saved by the application outside of zlib. + + deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); +/* + This function is equivalent to deflateEnd followed by deflateInit, but + does not free and reallocate the internal compression state. The stream + will leave the compression level and any other attributes that may have been + set unchanged. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, + int level, + int strategy)); +/* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2(). This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different strategy. + If the compression approach (which is a function of the level) or the + strategy is changed, and if any input has been consumed in a previous + deflate() call, then the input available so far is compressed with the old + level and strategy using deflate(strm, Z_BLOCK). There are three approaches + for the compression levels 0, 1..3, and 4..9 respectively. The new level + and strategy will take effect at the next call of deflate(). + + If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does + not have enough output space to complete, then the parameter change will not + take effect. In this case, deflateParams() can be called again with the + same parameters and more output space to try again. + + In order to assure a change in the parameters on the first try, the + deflate stream should be flushed using deflate() with Z_BLOCK or other flush + request until strm.avail_out is not zero, before calling deflateParams(). + Then no more input data should be provided before the deflateParams() call. + If this is done, the old level and strategy will be applied to the data + compressed before deflateParams(), and the new level and strategy will be + applied to the the data compressed after deflateParams(). + + deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream + state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if + there was not enough output space to complete the compression of the + available input data before a change in the strategy or approach. Note that + in the case of a Z_BUF_ERROR, the parameters are not changed. A return + value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be + retried with more output space. +*/ + +ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, + int good_length, + int max_lazy, + int nice_length, + int max_chain)); +/* + Fine tune deflate's internal compression parameters. This should only be + used by someone who understands the algorithm used by zlib's deflate for + searching for the best matching string, and even then only by the most + fanatic optimizer trying to squeeze out the last compressed bit for their + specific input data. Read the deflate.c source code for the meaning of the + max_lazy, good_length, nice_length, and max_chain parameters. + + deflateTune() can be called after deflateInit() or deflateInit2(), and + returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + */ + +ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, + uLong sourceLen)); +/* + deflateBound() returns an upper bound on the compressed size after + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. +*/ + +ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, + unsigned *pending, + int *bits)); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + +ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + deflatePrime() inserts bits in the deflate output stream. The intent + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, + gz_headerp head)); +/* + deflateSetHeader() provides gzip header information for when a gzip + stream is requested by deflateInit2(). deflateSetHeader() may be called + after deflateInit2() or deflateReset() and before the first call of + deflate(). The text, time, os, extra field, name, and comment information + in the provided gz_header structure are written to the gzip header (xflag is + ignored -- the extra flags are set according to the compression level). The + caller must assure that, if not Z_NULL, name and comment are terminated with + a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + available there. If hcrc is true, a gzip header crc is included. Note that + the current versions of the command-line version of gzip (up through version + 1.3.x) do not support header crc's, and will report that it is a "multi-part + gzip file" and give up. + + If deflateSetHeader is not used, the default gzip header has text false, + the time set to zero, and os set to 255, with no extra, name, or comment + fields. The gzip header is returned to the default state by deflateReset(). + + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, + int windowBits)); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an Adler-32 or a CRC-32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see + below), inflate() will not automatically decode concatenated gzip streams. + inflate() will return Z_STREAM_END at the end of the gzip stream. The state + would need to be reset to continue decoding a subsequent gzip stream. + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. +*/ + +ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the Adler-32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect Adler-32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). +*/ + +ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); +/* + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current current value of + total_in which indicates where valid compressed data was found. In the + error case, the application may repeatedly call inflateSync, providing more + input each time, until success or end of the input data. +*/ + +ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when randomly accessing a large stream. The + first pass through the stream can periodically record the inflate state, + allowing restarting inflate at those points when randomly accessing the + stream. + + inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); +/* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, + int windowBits)); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. If the window size is changed, then the + memory allocated for the window is freed, and the window will be reallocated + by inflate() if needed. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. +*/ + +ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + This function inserts bits in the inflate input stream. The intent is + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above, or -65536 if the provided + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, + gz_headerp head)); +/* + inflateGetHeader() requests that gzip header information be stored in the + provided gz_header structure. inflateGetHeader() may be called after + inflateInit2() or inflateReset(), and before the first call of inflate(). + As inflate() processes the gzip stream, head->done is zero until the header + is completed, at which time head->done is set to one. If a zlib stream is + being decoded, then head->done is set to -1 to indicate that there will be + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. + + The text, time, xflags, and os fields are filled in with the gzip header + contents. hcrc is set to true if there is a header CRC. (The header CRC + was valid if done is set to one.) If extra is not Z_NULL, then extra_max + contains the maximum number of bytes to write to extra. Once done is true, + extra_len contains the actual extra field length, and extra contains the + extra field, or that field truncated if extra_max is less than extra_len. + If name is not Z_NULL, then up to name_max characters are written there, + terminated with a zero unless the length is greater than name_max. If + comment is not Z_NULL, then up to comm_max characters are written there, + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its + absence. This allows the use of deflateSetHeader() with the returned + structure to duplicate the header. However if those fields are set to + allocated memory, then the application will need to save those pointers + elsewhere so that they can be eventually freed. + + If inflateGetHeader is not used, then the header information is simply + discarded. The header is always checked for validity, including the header + CRC if present. inflateReset() will reset the process to discard the header + information. The application would need to call inflateGetHeader() again to + retrieve the header from the next gzip stream. + + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, + unsigned char FAR *window)); + + Initialize the internal stream state for decompression using inflateBack() + calls. The fields zalloc, zfree and opaque in strm must be initialized + before the call. If zalloc and zfree are Z_NULL, then the default library- + derived memory allocation routines are used. windowBits is the base two + logarithm of the window size, in the range 8..15. window is a caller + supplied buffer of that size. Except for special applications where it is + assured that deflate was used with small window sizes, windowBits must be 15 + and a 32K byte window must be supplied to be able to decompress general + deflate streams. + + See inflateBack() for the usage of these routines. + + inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. +*/ + +typedef unsigned (*in_func) OF((void FAR *, + z_const unsigned char FAR * FAR *)); +typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); + +ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); +/* + inflateBack() does a raw inflate with a single call using a call-back + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. + + inflateBackInit() must be called first to allocate the internal state + and to initialize the state with the user-provided window buffer. + inflateBack() may then be used multiple times to inflate a complete, raw + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. + + A raw deflate stream is one with no zlib or gzip header or trailer. + This routine would normally be used in a utility that reads zip or gzip + files and writes out uncompressed files. The utility would decode the + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the default + behavior of inflate(), which expects a zlib header and trailer around the + deflate stream. + + inflateBack() uses two subroutines supplied by the caller that are then + called by inflateBack() for input and output. inflateBack() calls those + routines until it reads a complete deflate stream and writes out all of the + uncompressed data, or until it encounters an error. The function's + parameters and return types are defined above in the in_func and out_func + typedefs. inflateBack() will call in(in_desc, &buf) which should return the + number of bytes of provided input, and a pointer to that input in buf. If + there is no input available, in() must return zero -- buf is ignored in that + case -- and inflateBack() will return a buffer error. inflateBack() will + call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. + out() should return zero on success, or non-zero on failure. If out() + returns non-zero, inflateBack() will return with an error. Neither in() nor + out() are permitted to change the contents of the window provided to + inflateBackInit(), which is also the buffer that out() uses to write from. + The length written by out() will be at most the window size. Any non-zero + amount of input may be provided by in(). + + For convenience, inflateBack() can be provided input on the first call by + setting strm->next_in and strm->avail_in. If that input is exhausted, then + in() will be called. Therefore strm->next_in must be initialized before + calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + must also be initialized, and then if strm->avail_in is not zero, input will + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + + The in_desc and out_desc parameters of inflateBack() is passed as the + first parameter of in() and out() respectively when they are called. These + descriptors can be optionally used to pass any information that the caller- + supplied in() and out() functions need to do their job. + + On return, inflateBack() will set strm->next_in and strm->avail_in to + pass back any unused input that was provided by the last in() call. The + return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. +*/ + +ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); +/* + All memory allocated by inflateBackInit() is freed. + + inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + state was inconsistent. +*/ + +ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); +/* Return flags indicating compile-time options. + + Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + 1.0: size of uInt + 3.2: size of uLong + 5.4: size of voidpf (pointer) + 7.6: size of z_off_t + + Compiler, assembler, and debug options: + 8: ZLIB_DEBUG + 9: ASMV or ASMINF -- use ASM code + 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + 11: 0 (reserved) + + One-time table building (smaller code, but not thread-safe if true): + 12: BUILDFIXED -- build static block decoding tables when needed + 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + 14,15: 0 (reserved) + + Library content (indicates missing functionality): + 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + deflate code when not needed) + 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + and decode gzip streams (to avoid linking crc code) + 18-19: 0 (reserved) + + Operation variations (changes in library functionality): + 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + 21: FASTEST -- deflate algorithm with only one, lowest compression level + 22,23: 0 (reserved) + + The sprintf variant used by gzprintf (zero is best): + 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + + Remainder: + 27-31: 0 (reserved) + */ + +#ifndef Z_SOLO + + /* utility functions */ + +/* + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. +*/ + +ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. compress() is equivalent to compress2() with a level + parameter of Z_DEFAULT_COMPRESSION. + + compress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer. +*/ + +ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen, + int level)); +/* + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ + +ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); +/* + compressBound() returns an upper bound on the compressed size after + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. +*/ + +ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed data. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. +*/ + +ZEXTERN int ZEXPORT uncompress2 OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong *sourceLen)); +/* + Same as uncompress, except that sourceLen is a pointer, where the + length of the source is *sourceLen. On return, *sourceLen is the number of + source bytes consumed. +*/ + + /* gzip file access functions */ + +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ + +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ + +/* +ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); + + Opens a gzip (.gz) file for reading or writing. The mode parameter is as + in fopen ("rb" or "wb") but can also include a compression level ("wb9") or + a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only + compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' + for fixed code compression as in "wb9F". (See the description of + deflateInit2 for more information about the strategy parameter.) 'T' will + request transparent writing or appending with no compression and not using + the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. + + gzopen can be used to read a file which is not in gzip format; in this + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ + +ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); +/* + gzdopen associates a gzFile with the file descriptor fd. File descriptors + are obtained from calls like open, dup, creat, pipe or fileno (if the file + has been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); +/* + Set the internal buffer size used by this library's functions. The + default buffer size is 8192 bytes. This function must be called after + gzopen() or gzdopen(), and before any other calls that read or write the + file. The buffer memory allocation is always deferred to the first read or + write. Three times that size in buffer space is allocated. A larger buffer + size of, for example, 64K or 128K bytes will noticeably increase the speed + of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. +*/ + +ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); +/* + Dynamically update the compression level or strategy. See the description + of deflateInit2 for the meaning of these parameters. Previously provided + data is flushed before the parameter change. + + gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not + opened for writing, Z_ERRNO if there is an error writing the flushed data, + or Z_MEM_ERROR if there is a memory allocation error. +*/ + +ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); +/* + Reads the given number of uncompressed bytes from the compressed file. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. If len is too large to fit in an int, + then nothing is read, -1 is returned, and the error state is set to + Z_STREAM_ERROR. +*/ + +ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, + gzFile file)); +/* + Read up to nitems items of size size from file to buf, otherwise operating + as gzread() does. This duplicates the interface of stdio's fread(), with + size_t request and return types. If the library defines size_t, then + z_size_t is identical to size_t. If not, then z_size_t is an unsigned + integer type that can contain a pointer. + + gzfread() returns the number of full items read of size size, or zero if + the end of the file was reached and a full item could not be read, or if + there was an error. gzerror() must be consulted if zero is returned in + order to determine if there was an error. If the multiplication of size and + nitems overflows, i.e. the product does not fit in a z_size_t, then nothing + is read, zero is returned, and the error state is set to Z_STREAM_ERROR. + + In the event that the end of file is reached and only a partial item is + available at the end, i.e. the remaining uncompressed data length is not a + multiple of size, then the final partial item is nevetheless read into buf + and the end-of-file flag is set. The length of the partial item read is not + provided, but could be inferred from the result of gztell(). This behavior + is the same as the behavior of fread() implementations in common libraries, + but it prevents the direct use of gzfread() to read a concurrently written + file, reseting and retrying on end-of-file, when size is not 1. +*/ + +ZEXTERN int ZEXPORT gzwrite OF((gzFile file, + voidpc buf, unsigned len)); +/* + Writes the given number of uncompressed bytes into the compressed file. + gzwrite returns the number of uncompressed bytes written or 0 in case of + error. +*/ + +ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size, + z_size_t nitems, gzFile file)); +/* + gzfwrite() writes nitems items of size size from buf to file, duplicating + the interface of stdio's fwrite(), with size_t request and return types. If + the library defines size_t, then z_size_t is identical to size_t. If not, + then z_size_t is an unsigned integer type that can contain a pointer. + + gzfwrite() returns the number of full items written of size size, or zero + if there was an error. If the multiplication of size and nitems overflows, + i.e. the product does not fit in a z_size_t, then nothing is written, zero + is returned, and the error state is set to Z_STREAM_ERROR. +*/ + +ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); +/* + Converts, formats, and writes the arguments to the compressed file under + control of the format string, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or a negative zlib error code in case + of error. The number of uncompressed bytes written is limited to 8191, or + one less than the buffer size given to gzbuffer(). The caller should assure + that this limit is not exceeded. If it is exceeded, then gzprintf() will + return an error (0) with nothing written. In this case, there may also be a + buffer overflow with unpredictable consequences, which is possible only if + zlib was compiled with the insecure functions sprintf() or vsprintf() + because the secure snprintf() or vsnprintf() functions were not available. + This can be determined using zlibCompileFlags(). +*/ + +ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); +/* + Writes the given null-terminated string to the compressed file, excluding + the terminating null character. + + gzputs returns the number of characters written, or -1 in case of error. +*/ + +ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); +/* + Reads bytes from the compressed file until len-1 characters are read, or a + newline character is read and transferred to buf, or an end-of-file + condition is encountered. If any characters are read or if len == 1, the + string is terminated with a null character. If no characters are read due + to an end-of-file or len < 1, then the buffer is left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. +*/ + +ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); +/* + Writes c, converted to an unsigned char, into the compressed file. gzputc + returns the value that was written, or -1 in case of error. +*/ + +ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); +/* + Reads one byte from the compressed file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. +*/ + +ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); +/* + Push one character back onto the stream to be read as the first character + on the next read. At least one character of push-back is allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). +*/ + +ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); +/* + Flushes all pending output into the compressed file. The parameter flush + is as in the deflate() function. The return value is the zlib error number + (see function gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatenated gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, + z_off_t offset, int whence)); + + Sets the starting position for the next gzread or gzwrite on the given + compressed file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); + the value SEEK_END is not supported. + + If the file is opened for reading, this function is emulated but can be + extremely slow. If the file is opened for writing, only forward seeks are + supported; gzseek then compresses a sequence of zeroes up to the new + starting position. + + gzseek returns the resulting offset location as measured in bytes from + the beginning of the uncompressed stream, or -1 in case of error, in + particular if the file is opened for writing and the new starting position + would be before the current position. +*/ + +ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); +/* + Rewinds the given file. This function is supported only for reading. + + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); + + Returns the starting position for the next gzread or gzwrite on the given + compressed file. This position represents a number of bytes in the + uncompressed data stream, and is zero when starting, even if appending or + reading a gzip stream from the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); + + Returns the current offset in the file being read or written. This offset + includes the count of bytes that precede the gzip stream, for example when + appending or when using gzdopen() for reading. When reading, the offset + does not include as yet unused buffered input. This information can be used + for a progress indicator. On error, gzoffset() returns -1. +*/ + +ZEXTERN int ZEXPORT gzeof OF((gzFile file)); +/* + Returns true (1) if the end-of-file indicator has been set while reading, + false (0) otherwise. Note that the end-of-file indicator is set only if the + read tried to go past the end of the input, but came up short. Therefore, + just like feof(), gzeof() may return false even if there is no more data to + read, in the event that the last read request was for the exact number of + bytes remaining in the input file. This will happen if the input file size + is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. +*/ + +ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); +/* + Returns true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) +*/ + +ZEXTERN int ZEXPORT gzclose OF((gzFile file)); +/* + Flushes all pending output if necessary, closes the compressed file and + deallocates the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. +*/ + +ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); +/* + Returns the error message for the last error which occurred on the given + compressed file. errnum is set to zlib error number. If an error occurred + in the file system and not in the compression library, errnum is set to + Z_ERRNO and the application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. +*/ + +ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); +/* + Clears the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip + file that is being written concurrently. +*/ + +#endif /* !Z_SOLO */ + + /* checksum functions */ + +/* + These functions are not related to compression but are exported + anyway because they might be useful in applications using the compression + library. +*/ + +ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); +/* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. If buf is Z_NULL, this function returns the + required initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed + much faster. + + Usage example: + + uLong adler = adler32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); +*/ + +ZEXTERN uLong ZEXPORT adler32_z OF((uLong adler, const Bytef *buf, + z_size_t len)); +/* + Same as adler32(), but with a size_t length. +*/ + +/* +ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, + z_off_t len2)); + + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. +*/ + +ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); +/* + Update a running CRC-32 with the bytes buf[0..len-1] and return the + updated CRC-32. If buf is Z_NULL, this function returns the required + initial value for the crc. Pre- and post-conditioning (one's complement) is + performed within this function so it shouldn't be done by the application. + + Usage example: + + uLong crc = crc32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + crc = crc32(crc, buffer, length); + } + if (crc != original_crc) error(); +*/ + +ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf, + z_size_t len)); +/* + Same as crc32(), but with a size_t length. +*/ + +/* +ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); + + Combine two CRC-32 check values into one. For two sequences of bytes, + seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + len2. +*/ + + + /* various hacks, don't look :) */ + +/* deflateInit and inflateInit are macros to allow checking the zlib version + * and the compiler's view of z_stream: + */ +ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, + int windowBits, int memLevel, + int strategy, const char *version, + int stream_size)); +ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, + unsigned char FAR *window, + const char *version, + int stream_size)); +#ifdef Z_PREFIX_SET +# define z_deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +# define z_inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) +#else +# define deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +# define inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +# define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +# define inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +# define inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) +#endif + +#ifndef Z_SOLO + +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#endif + +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); +#endif + +#else /* Z_SOLO */ + + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); + +#endif /* !Z_SOLO */ + +/* undocumented functions */ +ZEXTERN const char * ZEXPORT zError OF((int)); +ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); +ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int)); +ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp)); +ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); +ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); +#if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, + const char *mode)); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, + const char *format, + va_list va)); +# endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ZLIB_H */ diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt new file mode 100644 index 000000000000..704a24204589 --- /dev/null +++ b/deps/zlib-1.2.11/CMakeLists.txt @@ -0,0 +1,8 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc) + +ADD_LIBRARY(z ${SRC}) diff --git a/deps/zlib-1.2.11/src/Makefile b/deps/zlib-1.2.11/src/Makefile new file mode 100644 index 000000000000..82bf40727c57 --- /dev/null +++ b/deps/zlib-1.2.11/src/Makefile @@ -0,0 +1,410 @@ +# Makefile for zlib +# Copyright (C) 1995-2017 Jean-loup Gailly, Mark Adler +# For conditions of distribution and use, see copyright notice in zlib.h + +# To compile and test, type: +# ./configure; make test +# Normally configure builds both a static and a shared library. +# If you want to build just a static library, use: ./configure --static + +# To use the asm code, type: +# cp contrib/asm?86/match.S ./match.S +# make LOC=-DASMV OBJA=match.o + +# To install /usr/local/lib/libz.* and /usr/local/include/zlib.h, type: +# make install +# To install in $HOME instead of /usr/local, use: +# make install prefix=$HOME + +CC=cc + +CFLAGS=-O2 -fomit-frame-pointer -pipe -D_LARGEFILE64_SOURCE=1 -DHAVE_HIDDEN +#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 +#CFLAGS=-g -DZLIB_DEBUG +#CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ +# -Wstrict-prototypes -Wmissing-prototypes + +SFLAGS=-O2 -fomit-frame-pointer -pipe -fPIC -D_LARGEFILE64_SOURCE=1 -DHAVE_HIDDEN +LDFLAGS= +TEST_LDFLAGS=-L. libz.a +LDSHARED=cc -shared -Wl,-soname,libz.so.1,--version-script,zlib.map +CPP=cc -E + +STATICLIB=libz.a +SHAREDLIB=libz.so +SHAREDLIBV=libz.so.1.2.11 +SHAREDLIBM=libz.so.1 +LIBS=$(STATICLIB) $(SHAREDLIBV) + +AR=ar +ARFLAGS=rc +RANLIB=ranlib +LDCONFIG=ldconfig +LDSHAREDLIBC=-lc +TAR=tar +SHELL=/bin/sh +EXE= + +prefix =/usr/local +exec_prefix =${prefix} +libdir =${exec_prefix}/lib +sharedlibdir =${libdir} +includedir =${prefix}/include +mandir =${prefix}/share/man +man3dir = ${mandir}/man3 +pkgconfigdir = ${libdir}/pkgconfig +SRCDIR= +ZINC= +ZINCOUT=-I. + +OBJZ = adler32.o crc32.o deflate.o infback.o inffast.o inflate.o inftrees.o trees.o zutil.o +OBJG = compress.o uncompr.o gzclose.o gzlib.o gzread.o gzwrite.o +OBJC = $(OBJZ) $(OBJG) + +PIC_OBJZ = adler32.lo crc32.lo deflate.lo infback.lo inffast.lo inflate.lo inftrees.lo trees.lo zutil.lo +PIC_OBJG = compress.lo uncompr.lo gzclose.lo gzlib.lo gzread.lo gzwrite.lo +PIC_OBJC = $(PIC_OBJZ) $(PIC_OBJG) + +# to use the asm code: make OBJA=match.o, PIC_OBJA=match.lo +OBJA = +PIC_OBJA = + +OBJS = $(OBJC) $(OBJA) + +PIC_OBJS = $(PIC_OBJC) $(PIC_OBJA) + +all: static shared all64 + +static: example$(EXE) minigzip$(EXE) + +shared: examplesh$(EXE) minigzipsh$(EXE) + +all64: example64$(EXE) minigzip64$(EXE) + +check: test + +test: all teststatic testshared test64 + +teststatic: static + @TMPST=tmpst_$$; \ + if echo hello world | ./minigzip | ./minigzip -d && ./example $$TMPST ; then \ + echo ' *** zlib test OK ***'; \ + else \ + echo ' *** zlib test FAILED ***'; false; \ + fi; \ + rm -f $$TMPST + +testshared: shared + @LD_LIBRARY_PATH=`pwd`:$(LD_LIBRARY_PATH) ; export LD_LIBRARY_PATH; \ + LD_LIBRARYN32_PATH=`pwd`:$(LD_LIBRARYN32_PATH) ; export LD_LIBRARYN32_PATH; \ + DYLD_LIBRARY_PATH=`pwd`:$(DYLD_LIBRARY_PATH) ; export DYLD_LIBRARY_PATH; \ + SHLIB_PATH=`pwd`:$(SHLIB_PATH) ; export SHLIB_PATH; \ + TMPSH=tmpsh_$$; \ + if echo hello world | ./minigzipsh | ./minigzipsh -d && ./examplesh $$TMPSH; then \ + echo ' *** zlib shared test OK ***'; \ + else \ + echo ' *** zlib shared test FAILED ***'; false; \ + fi; \ + rm -f $$TMPSH + +test64: all64 + @TMP64=tmp64_$$; \ + if echo hello world | ./minigzip64 | ./minigzip64 -d && ./example64 $$TMP64; then \ + echo ' *** zlib 64-bit test OK ***'; \ + else \ + echo ' *** zlib 64-bit test FAILED ***'; false; \ + fi; \ + rm -f $$TMP64 + +infcover.o: $(SRCDIR)test/infcover.c $(SRCDIR)zlib.h zconf.h + $(CC) $(CFLAGS) $(ZINCOUT) -c -o $@ $(SRCDIR)test/infcover.c + +infcover: infcover.o libz.a + $(CC) $(CFLAGS) -o $@ infcover.o libz.a + +cover: infcover + rm -f *.gcda + ./infcover + gcov inf*.c + +libz.a: $(OBJS) + $(AR) $(ARFLAGS) $@ $(OBJS) + -@ ($(RANLIB) $@ || true) >/dev/null 2>&1 + +match.o: match.S + $(CPP) match.S > _match.s + $(CC) -c _match.s + mv _match.o match.o + rm -f _match.s + +match.lo: match.S + $(CPP) match.S > _match.s + $(CC) -c -fPIC _match.s + mv _match.o match.lo + rm -f _match.s + +example.o: $(SRCDIR)test/example.c $(SRCDIR)zlib.h zconf.h + $(CC) $(CFLAGS) $(ZINCOUT) -c -o $@ $(SRCDIR)test/example.c + +minigzip.o: $(SRCDIR)test/minigzip.c $(SRCDIR)zlib.h zconf.h + $(CC) $(CFLAGS) $(ZINCOUT) -c -o $@ $(SRCDIR)test/minigzip.c + +example64.o: $(SRCDIR)test/example.c $(SRCDIR)zlib.h zconf.h + $(CC) $(CFLAGS) $(ZINCOUT) -D_FILE_OFFSET_BITS=64 -c -o $@ $(SRCDIR)test/example.c + +minigzip64.o: $(SRCDIR)test/minigzip.c $(SRCDIR)zlib.h zconf.h + $(CC) $(CFLAGS) $(ZINCOUT) -D_FILE_OFFSET_BITS=64 -c -o $@ $(SRCDIR)test/minigzip.c + + +adler32.o: $(SRCDIR)adler32.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)adler32.c + +crc32.o: $(SRCDIR)crc32.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)crc32.c + +deflate.o: $(SRCDIR)deflate.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)deflate.c + +infback.o: $(SRCDIR)infback.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)infback.c + +inffast.o: $(SRCDIR)inffast.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)inffast.c + +inflate.o: $(SRCDIR)inflate.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)inflate.c + +inftrees.o: $(SRCDIR)inftrees.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)inftrees.c + +trees.o: $(SRCDIR)trees.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)trees.c + +zutil.o: $(SRCDIR)zutil.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)zutil.c + +compress.o: $(SRCDIR)compress.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)compress.c + +uncompr.o: $(SRCDIR)uncompr.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)uncompr.c + +gzclose.o: $(SRCDIR)gzclose.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)gzclose.c + +gzlib.o: $(SRCDIR)gzlib.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)gzlib.c + +gzread.o: $(SRCDIR)gzread.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)gzread.c + +gzwrite.o: $(SRCDIR)gzwrite.c + $(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)gzwrite.c + + +adler32.lo: $(SRCDIR)adler32.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/adler32.o $(SRCDIR)adler32.c + -@mv objs/adler32.o $@ + +crc32.lo: $(SRCDIR)crc32.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/crc32.o $(SRCDIR)crc32.c + -@mv objs/crc32.o $@ + +deflate.lo: $(SRCDIR)deflate.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/deflate.o $(SRCDIR)deflate.c + -@mv objs/deflate.o $@ + +infback.lo: $(SRCDIR)infback.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/infback.o $(SRCDIR)infback.c + -@mv objs/infback.o $@ + +inffast.lo: $(SRCDIR)inffast.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/inffast.o $(SRCDIR)inffast.c + -@mv objs/inffast.o $@ + +inflate.lo: $(SRCDIR)inflate.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/inflate.o $(SRCDIR)inflate.c + -@mv objs/inflate.o $@ + +inftrees.lo: $(SRCDIR)inftrees.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/inftrees.o $(SRCDIR)inftrees.c + -@mv objs/inftrees.o $@ + +trees.lo: $(SRCDIR)trees.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/trees.o $(SRCDIR)trees.c + -@mv objs/trees.o $@ + +zutil.lo: $(SRCDIR)zutil.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/zutil.o $(SRCDIR)zutil.c + -@mv objs/zutil.o $@ + +compress.lo: $(SRCDIR)compress.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/compress.o $(SRCDIR)compress.c + -@mv objs/compress.o $@ + +uncompr.lo: $(SRCDIR)uncompr.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/uncompr.o $(SRCDIR)uncompr.c + -@mv objs/uncompr.o $@ + +gzclose.lo: $(SRCDIR)gzclose.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/gzclose.o $(SRCDIR)gzclose.c + -@mv objs/gzclose.o $@ + +gzlib.lo: $(SRCDIR)gzlib.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/gzlib.o $(SRCDIR)gzlib.c + -@mv objs/gzlib.o $@ + +gzread.lo: $(SRCDIR)gzread.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/gzread.o $(SRCDIR)gzread.c + -@mv objs/gzread.o $@ + +gzwrite.lo: $(SRCDIR)gzwrite.c + -@mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/gzwrite.o $(SRCDIR)gzwrite.c + -@mv objs/gzwrite.o $@ + + +placebo $(SHAREDLIBV): $(PIC_OBJS) libz.a + $(LDSHARED) $(SFLAGS) -o $@ $(PIC_OBJS) $(LDSHAREDLIBC) $(LDFLAGS) + rm -f $(SHAREDLIB) $(SHAREDLIBM) + ln -s $@ $(SHAREDLIB) + ln -s $@ $(SHAREDLIBM) + -@rmdir objs + +example$(EXE): example.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ example.o $(TEST_LDFLAGS) + +minigzip$(EXE): minigzip.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ minigzip.o $(TEST_LDFLAGS) + +examplesh$(EXE): example.o $(SHAREDLIBV) + $(CC) $(CFLAGS) -o $@ example.o -L. $(SHAREDLIBV) + +minigzipsh$(EXE): minigzip.o $(SHAREDLIBV) + $(CC) $(CFLAGS) -o $@ minigzip.o -L. $(SHAREDLIBV) + +example64$(EXE): example64.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ example64.o $(TEST_LDFLAGS) + +minigzip64$(EXE): minigzip64.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ minigzip64.o $(TEST_LDFLAGS) + +install-libs: $(LIBS) + -@if [ ! -d $(DESTDIR)$(exec_prefix) ]; then mkdir -p $(DESTDIR)$(exec_prefix); fi + -@if [ ! -d $(DESTDIR)$(libdir) ]; then mkdir -p $(DESTDIR)$(libdir); fi + -@if [ ! -d $(DESTDIR)$(sharedlibdir) ]; then mkdir -p $(DESTDIR)$(sharedlibdir); fi + -@if [ ! -d $(DESTDIR)$(man3dir) ]; then mkdir -p $(DESTDIR)$(man3dir); fi + -@if [ ! -d $(DESTDIR)$(pkgconfigdir) ]; then mkdir -p $(DESTDIR)$(pkgconfigdir); fi + rm -f $(DESTDIR)$(libdir)/$(STATICLIB) + cp $(STATICLIB) $(DESTDIR)$(libdir) + chmod 644 $(DESTDIR)$(libdir)/$(STATICLIB) + -@($(RANLIB) $(DESTDIR)$(libdir)/libz.a || true) >/dev/null 2>&1 + -@if test -n "$(SHAREDLIBV)"; then \ + rm -f $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV); \ + cp $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir); \ + echo "cp $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)"; \ + chmod 755 $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV); \ + echo "chmod 755 $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV)"; \ + rm -f $(DESTDIR)$(sharedlibdir)/$(SHAREDLIB) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBM); \ + ln -s $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIB); \ + ln -s $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBM); \ + ($(LDCONFIG) || true) >/dev/null 2>&1; \ + fi + rm -f $(DESTDIR)$(man3dir)/zlib.3 + cp $(SRCDIR)zlib.3 $(DESTDIR)$(man3dir) + chmod 644 $(DESTDIR)$(man3dir)/zlib.3 + rm -f $(DESTDIR)$(pkgconfigdir)/zlib.pc + cp zlib.pc $(DESTDIR)$(pkgconfigdir) + chmod 644 $(DESTDIR)$(pkgconfigdir)/zlib.pc +# The ranlib in install is needed on NeXTSTEP which checks file times +# ldconfig is for Linux + +install: install-libs + -@if [ ! -d $(DESTDIR)$(includedir) ]; then mkdir -p $(DESTDIR)$(includedir); fi + rm -f $(DESTDIR)$(includedir)/zlib.h $(DESTDIR)$(includedir)/zconf.h + cp $(SRCDIR)zlib.h zconf.h $(DESTDIR)$(includedir) + chmod 644 $(DESTDIR)$(includedir)/zlib.h $(DESTDIR)$(includedir)/zconf.h + +uninstall: + cd $(DESTDIR)$(includedir) && rm -f zlib.h zconf.h + cd $(DESTDIR)$(libdir) && rm -f libz.a; \ + if test -n "$(SHAREDLIBV)" -a -f $(SHAREDLIBV); then \ + rm -f $(SHAREDLIBV) $(SHAREDLIB) $(SHAREDLIBM); \ + fi + cd $(DESTDIR)$(man3dir) && rm -f zlib.3 + cd $(DESTDIR)$(pkgconfigdir) && rm -f zlib.pc + +docs: zlib.3.pdf + +zlib.3.pdf: $(SRCDIR)zlib.3 + groff -mandoc -f H -T ps $(SRCDIR)zlib.3 | ps2pdf - $@ + +zconf.h.cmakein: $(SRCDIR)zconf.h.in + -@ TEMPFILE=zconfh_$$; \ + echo "/#define ZCONF_H/ a\\\\\n#cmakedefine Z_PREFIX\\\\\n#cmakedefine Z_HAVE_UNISTD_H\n" >> $$TEMPFILE &&\ + sed -f $$TEMPFILE $(SRCDIR)zconf.h.in > $@ &&\ + touch -r $(SRCDIR)zconf.h.in $@ &&\ + rm $$TEMPFILE + +zconf: $(SRCDIR)zconf.h.in + cp -p $(SRCDIR)zconf.h.in zconf.h + +mostlyclean: clean +clean: + rm -f *.o *.lo *~ \ + example$(EXE) minigzip$(EXE) examplesh$(EXE) minigzipsh$(EXE) \ + example64$(EXE) minigzip64$(EXE) \ + infcover \ + libz.* foo.gz so_locations \ + _match.s maketree contrib/infback9/*.o + rm -rf objs + rm -f *.gcda *.gcno *.gcov + rm -f contrib/infback9/*.gcda contrib/infback9/*.gcno contrib/infback9/*.gcov + +maintainer-clean: distclean +distclean: clean zconf zconf.h.cmakein docs + rm -f Makefile zlib.pc configure.log + -@rm -f .DS_Store + @if [ -f Makefile.in ]; then \ + printf 'all:\n\t-@echo "Please use ./configure first. Thank you."\n' > Makefile ; \ + printf '\ndistclean:\n\tmake -f Makefile.in distclean\n' >> Makefile ; \ + touch -r $(SRCDIR)Makefile.in Makefile ; fi + @if [ ! -f zconf.h.in ]; then rm -f zconf.h zconf.h.cmakein ; fi + @if [ ! -f zlib.3 ]; then rm -f zlib.3.pdf ; fi + +tags: + etags $(SRCDIR)*.[ch] + +adler32.o zutil.o: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h +gzclose.o gzlib.o gzread.o gzwrite.o: $(SRCDIR)zlib.h zconf.h $(SRCDIR)gzguts.h +compress.o example.o minigzip.o uncompr.o: $(SRCDIR)zlib.h zconf.h +crc32.o: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)crc32.h +deflate.o: $(SRCDIR)deflate.h $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h +infback.o inflate.o: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)inftrees.h $(SRCDIR)inflate.h $(SRCDIR)inffast.h $(SRCDIR)inffixed.h +inffast.o: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)inftrees.h $(SRCDIR)inflate.h $(SRCDIR)inffast.h +inftrees.o: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)inftrees.h +trees.o: $(SRCDIR)deflate.h $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)trees.h + +adler32.lo zutil.lo: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h +gzclose.lo gzlib.lo gzread.lo gzwrite.lo: $(SRCDIR)zlib.h zconf.h $(SRCDIR)gzguts.h +compress.lo example.lo minigzip.lo uncompr.lo: $(SRCDIR)zlib.h zconf.h +crc32.lo: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)crc32.h +deflate.lo: $(SRCDIR)deflate.h $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h +infback.lo inflate.lo: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)inftrees.h $(SRCDIR)inflate.h $(SRCDIR)inffast.h $(SRCDIR)inffixed.h +inffast.lo: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)inftrees.h $(SRCDIR)inflate.h $(SRCDIR)inffast.h +inftrees.lo: $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)inftrees.h +trees.lo: $(SRCDIR)deflate.h $(SRCDIR)zutil.h $(SRCDIR)zlib.h zconf.h $(SRCDIR)trees.h diff --git a/deps/zlib-1.2.11/src/adler32.c b/deps/zlib-1.2.11/src/adler32.c new file mode 100644 index 000000000000..d0be4380a39c --- /dev/null +++ b/deps/zlib-1.2.11/src/adler32.c @@ -0,0 +1,186 @@ +/* adler32.c -- compute the Adler-32 checksum of a data stream + * Copyright (C) 1995-2011, 2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zutil.h" + +local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2)); + +#define BASE 65521U /* largest prime smaller than 65536 */ +#define NMAX 5552 +/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ + +#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;} +#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); +#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); +#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); +#define DO16(buf) DO8(buf,0); DO8(buf,8); + +/* use NO_DIVIDE if your processor does not do division in hardware -- + try it both ways to see which is faster */ +#ifdef NO_DIVIDE +/* note that this assumes BASE is 65521, where 65536 % 65521 == 15 + (thank you to John Reiser for pointing this out) */ +# define CHOP(a) \ + do { \ + unsigned long tmp = a >> 16; \ + a &= 0xffffUL; \ + a += (tmp << 4) - tmp; \ + } while (0) +# define MOD28(a) \ + do { \ + CHOP(a); \ + if (a >= BASE) a -= BASE; \ + } while (0) +# define MOD(a) \ + do { \ + CHOP(a); \ + MOD28(a); \ + } while (0) +# define MOD63(a) \ + do { /* this assumes a is not negative */ \ + z_off64_t tmp = a >> 32; \ + a &= 0xffffffffL; \ + a += (tmp << 8) - (tmp << 5) + tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + if (a >= BASE) a -= BASE; \ + } while (0) +#else +# define MOD(a) a %= BASE +# define MOD28(a) a %= BASE +# define MOD63(a) a %= BASE +#endif + +/* ========================================================================= */ +uLong ZEXPORT adler32_z(adler, buf, len) + uLong adler; + const Bytef *buf; + z_size_t len; +{ + unsigned long sum2; + unsigned n; + + /* split Adler-32 into component sums */ + sum2 = (adler >> 16) & 0xffff; + adler &= 0xffff; + + /* in case user likes doing a byte at a time, keep it fast */ + if (len == 1) { + adler += buf[0]; + if (adler >= BASE) + adler -= BASE; + sum2 += adler; + if (sum2 >= BASE) + sum2 -= BASE; + return adler | (sum2 << 16); + } + + /* initial Adler-32 value (deferred check for len == 1 speed) */ + if (buf == Z_NULL) + return 1L; + + /* in case short lengths are provided, keep it somewhat fast */ + if (len < 16) { + while (len--) { + adler += *buf++; + sum2 += adler; + } + if (adler >= BASE) + adler -= BASE; + MOD28(sum2); /* only added so many BASE's */ + return adler | (sum2 << 16); + } + + /* do length NMAX blocks -- requires just one modulo operation */ + while (len >= NMAX) { + len -= NMAX; + n = NMAX / 16; /* NMAX is divisible by 16 */ + do { + DO16(buf); /* 16 sums unrolled */ + buf += 16; + } while (--n); + MOD(adler); + MOD(sum2); + } + + /* do remaining bytes (less than NMAX, still just one modulo) */ + if (len) { /* avoid modulos if none remaining */ + while (len >= 16) { + len -= 16; + DO16(buf); + buf += 16; + } + while (len--) { + adler += *buf++; + sum2 += adler; + } + MOD(adler); + MOD(sum2); + } + + /* return recombined sums */ + return adler | (sum2 << 16); +} + +/* ========================================================================= */ +uLong ZEXPORT adler32(adler, buf, len) + uLong adler; + const Bytef *buf; + uInt len; +{ + return adler32_z(adler, buf, len); +} + +/* ========================================================================= */ +local uLong adler32_combine_(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + unsigned long sum1; + unsigned long sum2; + unsigned rem; + + /* for negative len, return invalid adler32 as a clue for debugging */ + if (len2 < 0) + return 0xffffffffUL; + + /* the derivation of this formula is left as an exercise for the reader */ + MOD63(len2); /* assumes len2 >= 0 */ + rem = (unsigned)len2; + sum1 = adler1 & 0xffff; + sum2 = rem * sum1; + MOD(sum2); + sum1 += (adler2 & 0xffff) + BASE - 1; + sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; + if (sum1 >= BASE) sum1 -= BASE; + if (sum1 >= BASE) sum1 -= BASE; + if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1); + if (sum2 >= BASE) sum2 -= BASE; + return sum1 | (sum2 << 16); +} + +/* ========================================================================= */ +uLong ZEXPORT adler32_combine(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} + +uLong ZEXPORT adler32_combine64(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + return adler32_combine_(adler1, adler2, len2); +} diff --git a/deps/zlib-1.2.11/src/compress.c b/deps/zlib-1.2.11/src/compress.c new file mode 100644 index 000000000000..e2db404abf88 --- /dev/null +++ b/deps/zlib-1.2.11/src/compress.c @@ -0,0 +1,86 @@ +/* compress.c -- compress a memory buffer + * Copyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#define ZLIB_INTERNAL +#include "zlib.h" + +/* =========================================================================== + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least 0.1% larger than sourceLen plus + 12 bytes. Upon exit, destLen is the actual size of the compressed buffer. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ +int ZEXPORT compress2 (dest, destLen, source, sourceLen, level) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong sourceLen; + int level; +{ + z_stream stream; + int err; + const uInt max = (uInt)-1; + uLong left; + + left = *destLen; + *destLen = 0; + + stream.zalloc = (alloc_func)0; + stream.zfree = (free_func)0; + stream.opaque = (voidpf)0; + + err = deflateInit(&stream, level); + if (err != Z_OK) return err; + + stream.next_out = dest; + stream.avail_out = 0; + stream.next_in = (z_const Bytef *)source; + stream.avail_in = 0; + + do { + if (stream.avail_out == 0) { + stream.avail_out = left > (uLong)max ? max : (uInt)left; + left -= stream.avail_out; + } + if (stream.avail_in == 0) { + stream.avail_in = sourceLen > (uLong)max ? max : (uInt)sourceLen; + sourceLen -= stream.avail_in; + } + err = deflate(&stream, sourceLen ? Z_NO_FLUSH : Z_FINISH); + } while (err == Z_OK); + + *destLen = stream.total_out; + deflateEnd(&stream); + return err == Z_STREAM_END ? Z_OK : err; +} + +/* =========================================================================== + */ +int ZEXPORT compress (dest, destLen, source, sourceLen) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong sourceLen; +{ + return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION); +} + +/* =========================================================================== + If the default memLevel or windowBits for deflateInit() is changed, then + this function needs to be updated. + */ +uLong ZEXPORT compressBound (sourceLen) + uLong sourceLen; +{ + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13; +} diff --git a/deps/zlib-1.2.11/src/crc32.c b/deps/zlib-1.2.11/src/crc32.c new file mode 100644 index 000000000000..9580440c0e6b --- /dev/null +++ b/deps/zlib-1.2.11/src/crc32.c @@ -0,0 +1,442 @@ +/* crc32.c -- compute the CRC-32 of a data stream + * Copyright (C) 1995-2006, 2010, 2011, 2012, 2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + * + * Thanks to Rodney Brown for his contribution of faster + * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing + * tables for updating the shift register in one step with three exclusive-ors + * instead of four steps with four exclusive-ors. This results in about a + * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. + */ + +/* @(#) $Id$ */ + +/* + Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore + protection on the static variables used to control the first-use generation + of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should + first call get_crc_table() to initialize the tables before allowing more than + one thread to use crc32(). + + DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h. + */ + +#ifdef MAKECRCH +# include +# ifndef DYNAMIC_CRC_TABLE +# define DYNAMIC_CRC_TABLE +# endif /* !DYNAMIC_CRC_TABLE */ +#endif /* MAKECRCH */ + +#include "zutil.h" /* for STDC and FAR definitions */ + +/* Definitions for doing the crc four data bytes at a time. */ +#if !defined(NOBYFOUR) && defined(Z_U4) +# define BYFOUR +#endif +#ifdef BYFOUR + local unsigned long crc32_little OF((unsigned long, + const unsigned char FAR *, z_size_t)); + local unsigned long crc32_big OF((unsigned long, + const unsigned char FAR *, z_size_t)); +# define TBLS 8 +#else +# define TBLS 1 +#endif /* BYFOUR */ + +/* Local functions for crc concatenation */ +local unsigned long gf2_matrix_times OF((unsigned long *mat, + unsigned long vec)); +local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); +local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2)); + + +#ifdef DYNAMIC_CRC_TABLE + +local volatile int crc_table_empty = 1; +local z_crc_t FAR crc_table[TBLS][256]; +local void make_crc_table OF((void)); +#ifdef MAKECRCH + local void write_table OF((FILE *, const z_crc_t FAR *)); +#endif /* MAKECRCH */ +/* + Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: + x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. + + Polynomials over GF(2) are represented in binary, one bit per coefficient, + with the lowest powers in the most significant bit. Then adding polynomials + is just exclusive-or, and multiplying a polynomial by x is a right shift by + one. If we call the above polynomial p, and represent a byte as the + polynomial q, also with the lowest power in the most significant bit (so the + byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, + where a mod b means the remainder after dividing a by b. + + This calculation is done using the shift-register method of multiplying and + taking the remainder. The register is initialized to zero, and for each + incoming bit, x^32 is added mod p to the register if the bit is a one (where + x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by + x (which is shifting right by one and adding x^32 mod p if the bit shifted + out is a one). We start with the highest power (least significant bit) of + q and repeat for all eight bits of q. + + The first table is simply the CRC of all possible eight bit values. This is + all the information needed to generate CRCs on data a byte at a time for all + combinations of CRC register values and incoming bytes. The remaining tables + allow for word-at-a-time CRC calculation for both big-endian and little- + endian machines, where a word is four bytes. +*/ +local void make_crc_table() +{ + z_crc_t c; + int n, k; + z_crc_t poly; /* polynomial exclusive-or pattern */ + /* terms of polynomial defining this crc (except x^32): */ + static volatile int first = 1; /* flag to limit concurrent making */ + static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; + + /* See if another task is already doing this (not thread-safe, but better + than nothing -- significantly reduces duration of vulnerability in + case the advice about DYNAMIC_CRC_TABLE is ignored) */ + if (first) { + first = 0; + + /* make exclusive-or pattern from polynomial (0xedb88320UL) */ + poly = 0; + for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++) + poly |= (z_crc_t)1 << (31 - p[n]); + + /* generate a crc for every 8-bit value */ + for (n = 0; n < 256; n++) { + c = (z_crc_t)n; + for (k = 0; k < 8; k++) + c = c & 1 ? poly ^ (c >> 1) : c >> 1; + crc_table[0][n] = c; + } + +#ifdef BYFOUR + /* generate crc for each value followed by one, two, and three zeros, + and then the byte reversal of those as well as the first table */ + for (n = 0; n < 256; n++) { + c = crc_table[0][n]; + crc_table[4][n] = ZSWAP32(c); + for (k = 1; k < 4; k++) { + c = crc_table[0][c & 0xff] ^ (c >> 8); + crc_table[k][n] = c; + crc_table[k + 4][n] = ZSWAP32(c); + } + } +#endif /* BYFOUR */ + + crc_table_empty = 0; + } + else { /* not first */ + /* wait for the other guy to finish (not efficient, but rare) */ + while (crc_table_empty) + ; + } + +#ifdef MAKECRCH + /* write out CRC tables to crc32.h */ + { + FILE *out; + + out = fopen("crc32.h", "w"); + if (out == NULL) return; + fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); + fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); + fprintf(out, "local const z_crc_t FAR "); + fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); + write_table(out, crc_table[0]); +# ifdef BYFOUR + fprintf(out, "#ifdef BYFOUR\n"); + for (k = 1; k < 8; k++) { + fprintf(out, " },\n {\n"); + write_table(out, crc_table[k]); + } + fprintf(out, "#endif\n"); +# endif /* BYFOUR */ + fprintf(out, " }\n};\n"); + fclose(out); + } +#endif /* MAKECRCH */ +} + +#ifdef MAKECRCH +local void write_table(out, table) + FILE *out; + const z_crc_t FAR *table; +{ + int n; + + for (n = 0; n < 256; n++) + fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", + (unsigned long)(table[n]), + n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); +} +#endif /* MAKECRCH */ + +#else /* !DYNAMIC_CRC_TABLE */ +/* ======================================================================== + * Tables of CRC-32s of all single-byte values, made by make_crc_table(). + */ +#include "crc32.h" +#endif /* DYNAMIC_CRC_TABLE */ + +/* ========================================================================= + * This function can be used by asm versions of crc32() + */ +const z_crc_t FAR * ZEXPORT get_crc_table() +{ +#ifdef DYNAMIC_CRC_TABLE + if (crc_table_empty) + make_crc_table(); +#endif /* DYNAMIC_CRC_TABLE */ + return (const z_crc_t FAR *)crc_table; +} + +/* ========================================================================= */ +#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8) +#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1 + +/* ========================================================================= */ +unsigned long ZEXPORT crc32_z(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + z_size_t len; +{ + if (buf == Z_NULL) return 0UL; + +#ifdef DYNAMIC_CRC_TABLE + if (crc_table_empty) + make_crc_table(); +#endif /* DYNAMIC_CRC_TABLE */ + +#ifdef BYFOUR + if (sizeof(void *) == sizeof(ptrdiff_t)) { + z_crc_t endian; + + endian = 1; + if (*((unsigned char *)(&endian))) + return crc32_little(crc, buf, len); + else + return crc32_big(crc, buf, len); + } +#endif /* BYFOUR */ + crc = crc ^ 0xffffffffUL; + while (len >= 8) { + DO8; + len -= 8; + } + if (len) do { + DO1; + } while (--len); + return crc ^ 0xffffffffUL; +} + +/* ========================================================================= */ +unsigned long ZEXPORT crc32(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + uInt len; +{ + return crc32_z(crc, buf, len); +} + +#ifdef BYFOUR + +/* + This BYFOUR code accesses the passed unsigned char * buffer with a 32-bit + integer pointer type. This violates the strict aliasing rule, where a + compiler can assume, for optimization purposes, that two pointers to + fundamentally different types won't ever point to the same memory. This can + manifest as a problem only if one of the pointers is written to. This code + only reads from those pointers. So long as this code remains isolated in + this compilation unit, there won't be a problem. For this reason, this code + should not be copied and pasted into a compilation unit in which other code + writes to the buffer that is passed to these routines. + */ + +/* ========================================================================= */ +#define DOLIT4 c ^= *buf4++; \ + c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ + crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] +#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 + +/* ========================================================================= */ +local unsigned long crc32_little(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + z_size_t len; +{ + register z_crc_t c; + register const z_crc_t FAR *buf4; + + c = (z_crc_t)crc; + c = ~c; + while (len && ((ptrdiff_t)buf & 3)) { + c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); + len--; + } + + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; + while (len >= 32) { + DOLIT32; + len -= 32; + } + while (len >= 4) { + DOLIT4; + len -= 4; + } + buf = (const unsigned char FAR *)buf4; + + if (len) do { + c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); + } while (--len); + c = ~c; + return (unsigned long)c; +} + +/* ========================================================================= */ +#define DOBIG4 c ^= *buf4++; \ + c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ + crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] +#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 + +/* ========================================================================= */ +local unsigned long crc32_big(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + z_size_t len; +{ + register z_crc_t c; + register const z_crc_t FAR *buf4; + + c = ZSWAP32((z_crc_t)crc); + c = ~c; + while (len && ((ptrdiff_t)buf & 3)) { + c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); + len--; + } + + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; + while (len >= 32) { + DOBIG32; + len -= 32; + } + while (len >= 4) { + DOBIG4; + len -= 4; + } + buf = (const unsigned char FAR *)buf4; + + if (len) do { + c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); + } while (--len); + c = ~c; + return (unsigned long)(ZSWAP32(c)); +} + +#endif /* BYFOUR */ + +#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */ + +/* ========================================================================= */ +local unsigned long gf2_matrix_times(mat, vec) + unsigned long *mat; + unsigned long vec; +{ + unsigned long sum; + + sum = 0; + while (vec) { + if (vec & 1) + sum ^= *mat; + vec >>= 1; + mat++; + } + return sum; +} + +/* ========================================================================= */ +local void gf2_matrix_square(square, mat) + unsigned long *square; + unsigned long *mat; +{ + int n; + + for (n = 0; n < GF2_DIM; n++) + square[n] = gf2_matrix_times(mat, mat[n]); +} + +/* ========================================================================= */ +local uLong crc32_combine_(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off64_t len2; +{ + int n; + unsigned long row; + unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ + unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ + + /* degenerate case (also disallow negative lengths) */ + if (len2 <= 0) + return crc1; + + /* put operator for one zero bit in odd */ + odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ + row = 1; + for (n = 1; n < GF2_DIM; n++) { + odd[n] = row; + row <<= 1; + } + + /* put operator for two zero bits in even */ + gf2_matrix_square(even, odd); + + /* put operator for four zero bits in odd */ + gf2_matrix_square(odd, even); + + /* apply len2 zeros to crc1 (first square will put the operator for one + zero byte, eight zero bits, in even) */ + do { + /* apply zeros operator for this bit of len2 */ + gf2_matrix_square(even, odd); + if (len2 & 1) + crc1 = gf2_matrix_times(even, crc1); + len2 >>= 1; + + /* if no more bits set, then done */ + if (len2 == 0) + break; + + /* another iteration of the loop with odd and even swapped */ + gf2_matrix_square(odd, even); + if (len2 & 1) + crc1 = gf2_matrix_times(odd, crc1); + len2 >>= 1; + + /* if no more bits set, then done */ + } while (len2 != 0); + + /* return combined crc */ + crc1 ^= crc2; + return crc1; +} + +/* ========================================================================= */ +uLong ZEXPORT crc32_combine(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} + +uLong ZEXPORT crc32_combine64(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off64_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} diff --git a/deps/zlib-1.2.11/src/crc32.h b/deps/zlib-1.2.11/src/crc32.h new file mode 100644 index 000000000000..9e0c77810251 --- /dev/null +++ b/deps/zlib-1.2.11/src/crc32.h @@ -0,0 +1,441 @@ +/* crc32.h -- tables for rapid CRC calculation + * Generated automatically by crc32.c + */ + +local const z_crc_t FAR crc_table[TBLS][256] = +{ + { + 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, + 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, + 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, + 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, + 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, + 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, + 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, + 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, + 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, + 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, + 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, + 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, + 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, + 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, + 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, + 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, + 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, + 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, + 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, + 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, + 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, + 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, + 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, + 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, + 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, + 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, + 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, + 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, + 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, + 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, + 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, + 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, + 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, + 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, + 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, + 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, + 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, + 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, + 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, + 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, + 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, + 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, + 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, + 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, + 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, + 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, + 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, + 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, + 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, + 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, + 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, + 0x2d02ef8dUL +#ifdef BYFOUR + }, + { + 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL, + 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL, + 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL, + 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL, + 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL, + 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL, + 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL, + 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL, + 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL, + 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL, + 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL, + 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL, + 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL, + 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL, + 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL, + 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL, + 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL, + 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL, + 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL, + 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL, + 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL, + 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL, + 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL, + 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL, + 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL, + 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL, + 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL, + 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL, + 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL, + 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL, + 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL, + 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL, + 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL, + 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL, + 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL, + 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL, + 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL, + 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL, + 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL, + 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL, + 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL, + 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL, + 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL, + 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL, + 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL, + 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL, + 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL, + 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL, + 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL, + 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL, + 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL, + 0x9324fd72UL + }, + { + 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL, + 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL, + 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL, + 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL, + 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL, + 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL, + 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL, + 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL, + 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL, + 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL, + 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL, + 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL, + 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL, + 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL, + 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL, + 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL, + 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL, + 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL, + 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL, + 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL, + 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL, + 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL, + 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL, + 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL, + 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL, + 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL, + 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL, + 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL, + 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL, + 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL, + 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL, + 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL, + 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL, + 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL, + 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL, + 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL, + 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL, + 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL, + 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL, + 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL, + 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL, + 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL, + 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL, + 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL, + 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL, + 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL, + 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL, + 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL, + 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL, + 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL, + 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL, + 0xbe9834edUL + }, + { + 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL, + 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL, + 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL, + 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL, + 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL, + 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL, + 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL, + 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL, + 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL, + 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL, + 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL, + 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL, + 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL, + 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL, + 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL, + 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL, + 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL, + 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL, + 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL, + 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL, + 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL, + 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL, + 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL, + 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL, + 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL, + 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL, + 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL, + 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL, + 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL, + 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL, + 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL, + 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL, + 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL, + 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL, + 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL, + 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL, + 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL, + 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL, + 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL, + 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL, + 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL, + 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL, + 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL, + 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL, + 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL, + 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL, + 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL, + 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL, + 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL, + 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL, + 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL, + 0xde0506f1UL + }, + { + 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL, + 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL, + 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL, + 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL, + 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL, + 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL, + 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL, + 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL, + 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL, + 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL, + 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL, + 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL, + 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL, + 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL, + 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL, + 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL, + 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL, + 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL, + 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL, + 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL, + 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL, + 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL, + 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL, + 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL, + 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL, + 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL, + 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL, + 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL, + 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL, + 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL, + 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL, + 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL, + 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL, + 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL, + 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL, + 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL, + 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL, + 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL, + 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL, + 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL, + 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL, + 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL, + 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL, + 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL, + 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL, + 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL, + 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL, + 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL, + 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL, + 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL, + 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL, + 0x8def022dUL + }, + { + 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL, + 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL, + 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL, + 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL, + 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL, + 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL, + 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL, + 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL, + 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL, + 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL, + 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL, + 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL, + 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL, + 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL, + 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL, + 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL, + 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL, + 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL, + 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL, + 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL, + 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL, + 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL, + 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL, + 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL, + 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL, + 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL, + 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL, + 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL, + 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL, + 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL, + 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL, + 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL, + 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL, + 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL, + 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL, + 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL, + 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL, + 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL, + 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL, + 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL, + 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL, + 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL, + 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL, + 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL, + 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL, + 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL, + 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL, + 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL, + 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL, + 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL, + 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL, + 0x72fd2493UL + }, + { + 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL, + 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL, + 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL, + 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL, + 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL, + 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL, + 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL, + 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL, + 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL, + 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL, + 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL, + 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL, + 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL, + 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL, + 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL, + 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL, + 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL, + 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL, + 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL, + 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL, + 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL, + 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL, + 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL, + 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL, + 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL, + 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL, + 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL, + 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL, + 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL, + 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL, + 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL, + 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL, + 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL, + 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL, + 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL, + 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL, + 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL, + 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL, + 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL, + 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL, + 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL, + 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL, + 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL, + 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL, + 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL, + 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL, + 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL, + 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL, + 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL, + 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL, + 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL, + 0xed3498beUL + }, + { + 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL, + 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL, + 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL, + 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL, + 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL, + 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL, + 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL, + 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL, + 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL, + 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL, + 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL, + 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL, + 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL, + 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL, + 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL, + 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL, + 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL, + 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL, + 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL, + 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL, + 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL, + 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL, + 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL, + 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL, + 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL, + 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL, + 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL, + 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL, + 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL, + 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL, + 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL, + 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL, + 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL, + 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL, + 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL, + 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL, + 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL, + 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL, + 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL, + 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL, + 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL, + 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL, + 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL, + 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL, + 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL, + 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL, + 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL, + 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL, + 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL, + 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL, + 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL, + 0xf10605deUL +#endif + } +}; diff --git a/deps/zlib-1.2.11/src/deflate.c b/deps/zlib-1.2.11/src/deflate.c new file mode 100644 index 000000000000..1ec761448de9 --- /dev/null +++ b/deps/zlib-1.2.11/src/deflate.c @@ -0,0 +1,2163 @@ +/* deflate.c -- compress data using the deflation algorithm + * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process depends on being able to identify portions + * of the input text which are identical to earlier input (within a + * sliding window trailing behind the input currently being processed). + * + * The most straightforward technique turns out to be the fastest for + * most input files: try all possible matches and select the longest. + * The key feature of this algorithm is that insertions into the string + * dictionary are very simple and thus fast, and deletions are avoided + * completely. Insertions are performed at each input character, whereas + * string matches are performed only when the previous match ends. So it + * is preferable to spend more time in matches to allow very fast string + * insertions and avoid deletions. The matching algorithm for small + * strings is inspired from that of Rabin & Karp. A brute force approach + * is used to find longer strings when a small match has been found. + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze + * (by Leonid Broukhis). + * A previous version of this file used a more sophisticated algorithm + * (by Fiala and Greene) which is guaranteed to run in linear amortized + * time, but has a larger average cost, uses more memory and is patented. + * However the F&G algorithm may be faster for some highly redundant + * files if the parameter max_chain_length (described below) is too large. + * + * ACKNOWLEDGEMENTS + * + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and + * I found it in 'freeze' written by Leonid Broukhis. + * Thanks to many people for bug reports and testing. + * + * REFERENCES + * + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". + * Available in http://tools.ietf.org/html/rfc1951 + * + * A description of the Rabin and Karp algorithm is given in the book + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. + * + * Fiala,E.R., and Greene,D.H. + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 + * + */ + +/* @(#) $Id$ */ + +#include "deflate.h" + +const char deflate_copyright[] = + " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* =========================================================================== + * Function prototypes. + */ +typedef enum { + need_more, /* block not completed, need more input or more output */ + block_done, /* block flush performed */ + finish_started, /* finish started, need only more output at next deflate */ + finish_done /* finish done, accept no more input or output */ +} block_state; + +typedef block_state (*compress_func) OF((deflate_state *s, int flush)); +/* Compression function. Returns the block state after the call. */ + +local int deflateStateCheck OF((z_streamp strm)); +local void slide_hash OF((deflate_state *s)); +local void fill_window OF((deflate_state *s)); +local block_state deflate_stored OF((deflate_state *s, int flush)); +local block_state deflate_fast OF((deflate_state *s, int flush)); +#ifndef FASTEST +local block_state deflate_slow OF((deflate_state *s, int flush)); +#endif +local block_state deflate_rle OF((deflate_state *s, int flush)); +local block_state deflate_huff OF((deflate_state *s, int flush)); +local void lm_init OF((deflate_state *s)); +local void putShortMSB OF((deflate_state *s, uInt b)); +local void flush_pending OF((z_streamp strm)); +local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); +#ifdef ASMV +# pragma message("Assembler code may have bugs -- use at your own risk") + void match_init OF((void)); /* asm code initialization */ + uInt longest_match OF((deflate_state *s, IPos cur_match)); +#else +local uInt longest_match OF((deflate_state *s, IPos cur_match)); +#endif + +#ifdef ZLIB_DEBUG +local void check_match OF((deflate_state *s, IPos start, IPos match, + int length)); +#endif + +/* =========================================================================== + * Local data + */ + +#define NIL 0 +/* Tail of hash chains */ + +#ifndef TOO_FAR +# define TOO_FAR 4096 +#endif +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +typedef struct config_s { + ush good_length; /* reduce lazy search above this match length */ + ush max_lazy; /* do not perform lazy search above this match length */ + ush nice_length; /* quit search above this match length */ + ush max_chain; + compress_func func; +} config; + +#ifdef FASTEST +local const config configuration_table[2] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ +#else +local const config configuration_table[10] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ +/* 2 */ {4, 5, 16, 8, deflate_fast}, +/* 3 */ {4, 6, 32, 32, deflate_fast}, + +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ +/* 5 */ {8, 16, 32, 32, deflate_slow}, +/* 6 */ {8, 16, 128, 128, deflate_slow}, +/* 7 */ {8, 32, 128, 256, deflate_slow}, +/* 8 */ {32, 128, 258, 1024, deflate_slow}, +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ +#endif + +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different + * meaning. + */ + +/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ +#define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0)) + +/* =========================================================================== + * Update a hash value with the given input byte + * IN assertion: all calls to UPDATE_HASH are made with consecutive input + * characters, so that a running hash key can be computed from the previous + * key instead of complete recalculation each time. + */ +#define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) + + +/* =========================================================================== + * Insert string str in the dictionary and set match_head to the previous head + * of the hash chain (the most recent string with same hash key). Return + * the previous length of the hash chain. + * If this file is compiled with -DFASTEST, the compression level is forced + * to 1, and no hash chains are maintained. + * IN assertion: all calls to INSERT_STRING are made with consecutive input + * characters and the first MIN_MATCH bytes of str are valid (except for + * the last MIN_MATCH-1 bytes of the input file). + */ +#ifdef FASTEST +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + match_head = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) +#else +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) +#endif + +/* =========================================================================== + * Initialize the hash table (avoiding 64K overflow for 16 bit systems). + * prev[] will be initialized on the fly. + */ +#define CLEAR_HASH(s) \ + s->head[s->hash_size-1] = NIL; \ + zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); + +/* =========================================================================== + * Slide the hash table when sliding the window down (could be avoided with 32 + * bit values at the expense of memory usage). We slide even when level == 0 to + * keep the hash table consistent if we switch back to level > 0 later. + */ +local void slide_hash(s) + deflate_state *s; +{ + unsigned n, m; + Posf *p; + uInt wsize = s->w_size; + + n = s->hash_size; + p = &s->head[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m - wsize : NIL); + } while (--n); + n = wsize; +#ifndef FASTEST + p = &s->prev[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m - wsize : NIL); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); +#endif +} + +/* ========================================================================= */ +int ZEXPORT deflateInit_(strm, level, version, stream_size) + z_streamp strm; + int level; + const char *version; + int stream_size; +{ + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, + Z_DEFAULT_STRATEGY, version, stream_size); + /* To do: ignore strm->next_in if we use it as window */ +} + +/* ========================================================================= */ +int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + version, stream_size) + z_streamp strm; + int level; + int method; + int windowBits; + int memLevel; + int strategy; + const char *version; + int stream_size; +{ + deflate_state *s; + int wrap = 1; + static const char my_version[] = ZLIB_VERSION; + + ushf *overlay; + /* We overlay pending_buf and d_buf+l_buf. This works since the average + * output size for (length,distance) codes is <= 24 bits. + */ + + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; + } + if (strm == Z_NULL) return Z_STREAM_ERROR; + + strm->msg = Z_NULL; + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + +#ifdef FASTEST + if (level != 0) level = 1; +#else + if (level == Z_DEFAULT_COMPRESSION) level = 6; +#endif + + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; + } +#ifdef GZIP + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; + } +#endif + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) { + return Z_STREAM_ERROR; + } + if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); + if (s == Z_NULL) return Z_MEM_ERROR; + strm->state = (struct internal_state FAR *)s; + s->strm = strm; + s->status = INIT_STATE; /* to pass state test in deflateReset() */ + + s->wrap = wrap; + s->gzhead = Z_NULL; + s->w_bits = (uInt)windowBits; + s->w_size = 1 << s->w_bits; + s->w_mask = s->w_size - 1; + + s->hash_bits = (uInt)memLevel + 7; + s->hash_size = 1 << s->hash_bits; + s->hash_mask = s->hash_size - 1; + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); + + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + + s->high_water = 0; /* nothing written to s->window yet */ + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + s->pending_buf = (uchf *) overlay; + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { + s->status = FINISH_STATE; + strm->msg = ERR_MSG(Z_MEM_ERROR); + deflateEnd (strm); + return Z_MEM_ERROR; + } + s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + + s->level = level; + s->strategy = strategy; + s->method = (Byte)method; + + return deflateReset(strm); +} + +/* ========================================================================= + * Check for a valid deflate stream state. Return 0 if ok, 1 if not. + */ +local int deflateStateCheck (strm) + z_streamp strm; +{ + deflate_state *s; + if (strm == Z_NULL || + strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) + return 1; + s = strm->state; + if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE && +#ifdef GZIP + s->status != GZIP_STATE && +#endif + s->status != EXTRA_STATE && + s->status != NAME_STATE && + s->status != COMMENT_STATE && + s->status != HCRC_STATE && + s->status != BUSY_STATE && + s->status != FINISH_STATE)) + return 1; + return 0; +} + +/* ========================================================================= */ +int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) + z_streamp strm; + const Bytef *dictionary; + uInt dictLength; +{ + deflate_state *s; + uInt str, n; + int wrap; + unsigned avail; + z_const unsigned char *next; + + if (deflateStateCheck(strm) || dictionary == Z_NULL) + return Z_STREAM_ERROR; + s = strm->state; + wrap = s->wrap; + if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) + return Z_STREAM_ERROR; + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap == 1) + strm->adler = adler32(strm->adler, dictionary, dictLength); + s->wrap = 0; /* avoid computing Adler-32 in read_buf */ + + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s->w_size) { + if (wrap == 0) { /* already empty otherwise */ + CLEAR_HASH(s); + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + dictionary += dictLength - s->w_size; /* use the tail */ + dictLength = s->w_size; + } + + /* insert dictionary into window and hash */ + avail = strm->avail_in; + next = strm->next_in; + strm->avail_in = dictLength; + strm->next_in = (z_const Bytef *)dictionary; + fill_window(s); + while (s->lookahead >= MIN_MATCH) { + str = s->strstart; + n = s->lookahead - (MIN_MATCH-1); + do { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + } while (--n); + s->strstart = str; + s->lookahead = MIN_MATCH-1; + fill_window(s); + } + s->strstart += s->lookahead; + s->block_start = (long)s->strstart; + s->insert = s->lookahead; + s->lookahead = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + strm->next_in = next; + strm->avail_in = avail; + s->wrap = wrap; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength) + z_streamp strm; + Bytef *dictionary; + uInt *dictLength; +{ + deflate_state *s; + uInt len; + + if (deflateStateCheck(strm)) + return Z_STREAM_ERROR; + s = strm->state; + len = s->strstart + s->lookahead; + if (len > s->w_size) + len = s->w_size; + if (dictionary != Z_NULL && len) + zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len); + if (dictLength != Z_NULL) + *dictLength = len; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateResetKeep (strm) + z_streamp strm; +{ + deflate_state *s; + + if (deflateStateCheck(strm)) { + return Z_STREAM_ERROR; + } + + strm->total_in = strm->total_out = 0; + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ + strm->data_type = Z_UNKNOWN; + + s = (deflate_state *)strm->state; + s->pending = 0; + s->pending_out = s->pending_buf; + + if (s->wrap < 0) { + s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ + } + s->status = +#ifdef GZIP + s->wrap == 2 ? GZIP_STATE : +#endif + s->wrap ? INIT_STATE : BUSY_STATE; + strm->adler = +#ifdef GZIP + s->wrap == 2 ? crc32(0L, Z_NULL, 0) : +#endif + adler32(0L, Z_NULL, 0); + s->last_flush = Z_NO_FLUSH; + + _tr_init(s); + + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateReset (strm) + z_streamp strm; +{ + int ret; + + ret = deflateResetKeep(strm); + if (ret == Z_OK) + lm_init(strm->state); + return ret; +} + +/* ========================================================================= */ +int ZEXPORT deflateSetHeader (strm, head) + z_streamp strm; + gz_headerp head; +{ + if (deflateStateCheck(strm) || strm->state->wrap != 2) + return Z_STREAM_ERROR; + strm->state->gzhead = head; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflatePending (strm, pending, bits) + unsigned *pending; + int *bits; + z_streamp strm; +{ + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + if (pending != Z_NULL) + *pending = strm->state->pending; + if (bits != Z_NULL) + *bits = strm->state->bi_valid; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflatePrime (strm, bits, value) + z_streamp strm; + int bits; + int value; +{ + deflate_state *s; + int put; + + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + s = strm->state; + if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; + if (put > bits) + put = bits; + s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); + s->bi_valid += put; + _tr_flush_bits(s); + value >>= put; + bits -= put; + } while (bits); + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateParams(strm, level, strategy) + z_streamp strm; + int level; + int strategy; +{ + deflate_state *s; + compress_func func; + + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + s = strm->state; + +#ifdef FASTEST + if (level != 0) level = 1; +#else + if (level == Z_DEFAULT_COMPRESSION) level = 6; +#endif + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { + return Z_STREAM_ERROR; + } + func = configuration_table[s->level].func; + + if ((strategy != s->strategy || func != configuration_table[level].func) && + s->high_water) { + /* Flush the last buffer: */ + int err = deflate(strm, Z_BLOCK); + if (err == Z_STREAM_ERROR) + return err; + if (strm->avail_out == 0) + return Z_BUF_ERROR; + } + if (s->level != level) { + if (s->level == 0 && s->matches != 0) { + if (s->matches == 1) + slide_hash(s); + else + CLEAR_HASH(s); + s->matches = 0; + } + s->level = level; + s->max_lazy_match = configuration_table[level].max_lazy; + s->good_match = configuration_table[level].good_length; + s->nice_match = configuration_table[level].nice_length; + s->max_chain_length = configuration_table[level].max_chain; + } + s->strategy = strategy; + return Z_OK; +} + +/* ========================================================================= */ +int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) + z_streamp strm; + int good_length; + int max_lazy; + int nice_length; + int max_chain; +{ + deflate_state *s; + + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + s = strm->state; + s->good_match = (uInt)good_length; + s->max_lazy_match = (uInt)max_lazy; + s->nice_match = nice_length; + s->max_chain_length = (uInt)max_chain; + return Z_OK; +} + +/* ========================================================================= + * For the default windowBits of 15 and memLevel of 8, this function returns + * a close to exact, as well as small, upper bound on the compressed size. + * They are coded as constants here for a reason--if the #define's are + * changed, then this function needs to be changed as well. The return + * value for 15 and 8 only works for those exact settings. + * + * For any setting other than those defaults for windowBits and memLevel, + * the value returned is a conservative worst case for the maximum expansion + * resulting from using fixed blocks instead of stored blocks, which deflate + * can emit on compressed data for some combinations of the parameters. + * + * This function could be more sophisticated to provide closer upper bounds for + * every combination of windowBits and memLevel. But even the conservative + * upper bound of about 14% expansion does not seem onerous for output buffer + * allocation. + */ +uLong ZEXPORT deflateBound(strm, sourceLen) + z_streamp strm; + uLong sourceLen; +{ + deflate_state *s; + uLong complen, wraplen; + + /* conservative upper bound for compressed data */ + complen = sourceLen + + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; + + /* if can't get parameters, return conservative bound plus zlib wrapper */ + if (deflateStateCheck(strm)) + return complen + 6; + + /* compute wrapper length */ + s = strm->state; + switch (s->wrap) { + case 0: /* raw deflate */ + wraplen = 0; + break; + case 1: /* zlib wrapper */ + wraplen = 6 + (s->strstart ? 4 : 0); + break; +#ifdef GZIP + case 2: /* gzip wrapper */ + wraplen = 18; + if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ + Bytef *str; + if (s->gzhead->extra != Z_NULL) + wraplen += 2 + s->gzhead->extra_len; + str = s->gzhead->name; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + str = s->gzhead->comment; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + if (s->gzhead->hcrc) + wraplen += 2; + } + break; +#endif + default: /* for compiler happiness */ + wraplen = 6; + } + + /* if not default parameters, return conservative bound */ + if (s->w_bits != 15 || s->hash_bits != 8 + 7) + return complen + wraplen; + + /* default settings: return tight bound for that case */ + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13 - 6 + wraplen; +} + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +local void putShortMSB (s, b) + deflate_state *s; + uInt b; +{ + put_byte(s, (Byte)(b >> 8)); + put_byte(s, (Byte)(b & 0xff)); +} + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output, except for + * some deflate_stored() output, goes through this function so some + * applications may wish to modify it to avoid allocating a large + * strm->next_out buffer and copying into it. (See also read_buf()). + */ +local void flush_pending(strm) + z_streamp strm; +{ + unsigned len; + deflate_state *s = strm->state; + + _tr_flush_bits(s); + len = s->pending; + if (len > strm->avail_out) len = strm->avail_out; + if (len == 0) return; + + zmemcpy(strm->next_out, s->pending_out, len); + strm->next_out += len; + s->pending_out += len; + strm->total_out += len; + strm->avail_out -= len; + s->pending -= len; + if (s->pending == 0) { + s->pending_out = s->pending_buf; + } +} + +/* =========================================================================== + * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1]. + */ +#define HCRC_UPDATE(beg) \ + do { \ + if (s->gzhead->hcrc && s->pending > (beg)) \ + strm->adler = crc32(strm->adler, s->pending_buf + (beg), \ + s->pending - (beg)); \ + } while (0) + +/* ========================================================================= */ +int ZEXPORT deflate (strm, flush) + z_streamp strm; + int flush; +{ + int old_flush; /* value of flush param for previous deflate call */ + deflate_state *s; + + if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) { + return Z_STREAM_ERROR; + } + s = strm->state; + + if (strm->next_out == Z_NULL || + (strm->avail_in != 0 && strm->next_in == Z_NULL) || + (s->status == FINISH_STATE && flush != Z_FINISH)) { + ERR_RETURN(strm, Z_STREAM_ERROR); + } + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); + + old_flush = s->last_flush; + s->last_flush = flush; + + /* Flush as much pending output as possible */ + if (s->pending != 0) { + flush_pending(strm); + if (strm->avail_out == 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s->last_flush = -1; + return Z_OK; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && + flush != Z_FINISH) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* User must not provide more input after the first FINISH: */ + if (s->status == FINISH_STATE && strm->avail_in != 0) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* Write the header */ + if (s->status == INIT_STATE) { + /* zlib header */ + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; + uInt level_flags; + + if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) + level_flags = 0; + else if (s->level < 6) + level_flags = 1; + else if (s->level == 6) + level_flags = 2; + else + level_flags = 3; + header |= (level_flags << 6); + if (s->strstart != 0) header |= PRESET_DICT; + header += 31 - (header % 31); + + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s->strstart != 0) { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + strm->adler = adler32(0L, Z_NULL, 0); + s->status = BUSY_STATE; + + /* Compression must start with an empty pending buffer */ + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + } +#ifdef GZIP + if (s->status == GZIP_STATE) { + /* gzip header */ + strm->adler = crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (s->gzhead == Z_NULL) { + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s->level == 9 ? 2 : + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s->status = BUSY_STATE; + + /* Compression must start with an empty pending buffer */ + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + } + else { + put_byte(s, (s->gzhead->text ? 1 : 0) + + (s->gzhead->hcrc ? 2 : 0) + + (s->gzhead->extra == Z_NULL ? 0 : 4) + + (s->gzhead->name == Z_NULL ? 0 : 8) + + (s->gzhead->comment == Z_NULL ? 0 : 16) + ); + put_byte(s, (Byte)(s->gzhead->time & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); + put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); + put_byte(s, s->level == 9 ? 2 : + (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? + 4 : 0)); + put_byte(s, s->gzhead->os & 0xff); + if (s->gzhead->extra != Z_NULL) { + put_byte(s, s->gzhead->extra_len & 0xff); + put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); + } + if (s->gzhead->hcrc) + strm->adler = crc32(strm->adler, s->pending_buf, + s->pending); + s->gzindex = 0; + s->status = EXTRA_STATE; + } + } + if (s->status == EXTRA_STATE) { + if (s->gzhead->extra != Z_NULL) { + ulg beg = s->pending; /* start of bytes to update crc */ + uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex; + while (s->pending + left > s->pending_buf_size) { + uInt copy = s->pending_buf_size - s->pending; + zmemcpy(s->pending_buf + s->pending, + s->gzhead->extra + s->gzindex, copy); + s->pending = s->pending_buf_size; + HCRC_UPDATE(beg); + s->gzindex += copy; + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + beg = 0; + left -= copy; + } + zmemcpy(s->pending_buf + s->pending, + s->gzhead->extra + s->gzindex, left); + s->pending += left; + HCRC_UPDATE(beg); + s->gzindex = 0; + } + s->status = NAME_STATE; + } + if (s->status == NAME_STATE) { + if (s->gzhead->name != Z_NULL) { + ulg beg = s->pending; /* start of bytes to update crc */ + int val; + do { + if (s->pending == s->pending_buf_size) { + HCRC_UPDATE(beg); + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + beg = 0; + } + val = s->gzhead->name[s->gzindex++]; + put_byte(s, val); + } while (val != 0); + HCRC_UPDATE(beg); + s->gzindex = 0; + } + s->status = COMMENT_STATE; + } + if (s->status == COMMENT_STATE) { + if (s->gzhead->comment != Z_NULL) { + ulg beg = s->pending; /* start of bytes to update crc */ + int val; + do { + if (s->pending == s->pending_buf_size) { + HCRC_UPDATE(beg); + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + beg = 0; + } + val = s->gzhead->comment[s->gzindex++]; + put_byte(s, val); + } while (val != 0); + HCRC_UPDATE(beg); + } + s->status = HCRC_STATE; + } + if (s->status == HCRC_STATE) { + if (s->gzhead->hcrc) { + if (s->pending + 2 > s->pending_buf_size) { + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + } + put_byte(s, (Byte)(strm->adler & 0xff)); + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); + strm->adler = crc32(0L, Z_NULL, 0); + } + s->status = BUSY_STATE; + + /* Compression must start with an empty pending buffer */ + flush_pending(strm); + if (s->pending != 0) { + s->last_flush = -1; + return Z_OK; + } + } +#endif + + /* Start a new block or continue the current one. + */ + if (strm->avail_in != 0 || s->lookahead != 0 || + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { + block_state bstate; + + bstate = s->level == 0 ? deflate_stored(s, flush) : + s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : + s->strategy == Z_RLE ? deflate_rle(s, flush) : + (*(configuration_table[s->level].func))(s, flush); + + if (bstate == finish_started || bstate == finish_done) { + s->status = FINISH_STATE; + } + if (bstate == need_more || bstate == finish_started) { + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate == block_done) { + if (flush == Z_PARTIAL_FLUSH) { + _tr_align(s); + } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ + _tr_stored_block(s, (char*)0, 0L, 0); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush == Z_FULL_FLUSH) { + CLEAR_HASH(s); /* forget history */ + if (s->lookahead == 0) { + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + } + } + flush_pending(strm); + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } + } + } + + if (flush != Z_FINISH) return Z_OK; + if (s->wrap <= 0) return Z_STREAM_END; + + /* Write the trailer */ +#ifdef GZIP + if (s->wrap == 2) { + put_byte(s, (Byte)(strm->adler & 0xff)); + put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); + put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); + put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); + put_byte(s, (Byte)(strm->total_in & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); + put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); + } + else +#endif + { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ + return s->pending != 0 ? Z_OK : Z_STREAM_END; +} + +/* ========================================================================= */ +int ZEXPORT deflateEnd (strm) + z_streamp strm; +{ + int status; + + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + + status = strm->state->status; + + /* Deallocate in reverse order of allocations: */ + TRY_FREE(strm, strm->state->pending_buf); + TRY_FREE(strm, strm->state->head); + TRY_FREE(strm, strm->state->prev); + TRY_FREE(strm, strm->state->window); + + ZFREE(strm, strm->state); + strm->state = Z_NULL; + + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; +} + +/* ========================================================================= + * Copy the source state to the destination state. + * To simplify the source, this is not supported for 16-bit MSDOS (which + * doesn't have enough memory anyway to duplicate compression states). + */ +int ZEXPORT deflateCopy (dest, source) + z_streamp dest; + z_streamp source; +{ +#ifdef MAXSEG_64K + return Z_STREAM_ERROR; +#else + deflate_state *ds; + deflate_state *ss; + ushf *overlay; + + + if (deflateStateCheck(source) || dest == Z_NULL) { + return Z_STREAM_ERROR; + } + + ss = source->state; + + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); + if (ds == Z_NULL) return Z_MEM_ERROR; + dest->state = (struct internal_state FAR *) ds; + zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); + ds->strm = dest; + + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); + ds->pending_buf = (uchf *) overlay; + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { + deflateEnd (dest); + return Z_MEM_ERROR; + } + /* following zmemcpy do not work for 16-bit MSDOS */ + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); + zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); + zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; + ds->bl_desc.dyn_tree = ds->bl_tree; + + return Z_OK; +#endif /* MAXSEG_64K */ +} + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->next_in buffer and copying from it. + * (See also flush_pending()). + */ +local unsigned read_buf(strm, buf, size) + z_streamp strm; + Bytef *buf; + unsigned size; +{ + unsigned len = strm->avail_in; + + if (len > size) len = size; + if (len == 0) return 0; + + strm->avail_in -= len; + + zmemcpy(buf, strm->next_in, len); + if (strm->state->wrap == 1) { + strm->adler = adler32(strm->adler, buf, len); + } +#ifdef GZIP + else if (strm->state->wrap == 2) { + strm->adler = crc32(strm->adler, buf, len); + } +#endif + strm->next_in += len; + strm->total_in += len; + + return len; +} + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +local void lm_init (s) + deflate_state *s; +{ + s->window_size = (ulg)2L*s->w_size; + + CLEAR_HASH(s); + + /* Set the default configuration parameters: + */ + s->max_lazy_match = configuration_table[s->level].max_lazy; + s->good_match = configuration_table[s->level].good_length; + s->nice_match = configuration_table[s->level].nice_length; + s->max_chain_length = configuration_table[s->level].max_chain; + + s->strstart = 0; + s->block_start = 0L; + s->lookahead = 0; + s->insert = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + s->ins_h = 0; +#ifndef FASTEST +#ifdef ASMV + match_init(); /* initialize the asm code */ +#endif +#endif +} + +#ifndef FASTEST +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +#ifndef ASMV +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or + * match.S. The code will be functionally equivalent. + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + unsigned chain_length = s->max_chain_length;/* max hash chain length */ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + int best_len = (int)s->prev_length; /* best match length so far */ + int nice_match = s->nice_match; /* stop if match long enough */ + IPos limit = s->strstart > (IPos)MAX_DIST(s) ? + s->strstart - (IPos)MAX_DIST(s) : NIL; + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + Posf *prev = s->prev; + uInt wmask = s->w_mask; + +#ifdef UNALIGNED_OK + /* Compare two bytes at a time. Note: this is not always beneficial. + * Try with and without -DUNALIGNED_OK to check. + */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; + register ush scan_start = *(ushf*)scan; + register ush scan_end = *(ushf*)(scan+best_len-1); +#else + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + register Byte scan_end1 = scan[best_len-1]; + register Byte scan_end = scan[best_len]; +#endif + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s->prev_length >= s->good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead; + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + Assert(cur_match < s->strstart, "no future"); + match = s->window + cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) + /* This code assumes sizeof(unsigned short) == 2. Do not use + * UNALIGNED_OK if your compiler uses a different size. + */ + if (*(ushf*)(match+best_len-1) != scan_end || + *(ushf*)match != scan_start) continue; + + /* It is not necessary to compare scan[2] and match[2] since they are + * always equal when the other bytes match, given that the hash keys + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at + * strstart+3, +5, ... up to strstart+257. We check for insufficient + * lookahead only every 4th comparison; the 128th check will be made + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is + * necessary to put more guard bytes at the end of the window, or + * to check more often for insufficient lookahead. + */ + Assert(scan[2] == match[2], "scan[2]?"); + scan++, match++; + do { + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + scan < strend); + /* The funny "do {}" generates better code on most compilers */ + + /* Here, scan <= window+strstart+257 */ + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + if (*scan == *match) scan++; + + len = (MAX_MATCH - 1) - (int)(strend-scan); + scan = strend - (MAX_MATCH-1); + +#else /* UNALIGNED_OK */ + + if (match[best_len] != scan_end || + match[best_len-1] != scan_end1 || + *match != *scan || + *++match != scan[1]) continue; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match++; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + scan = strend - MAX_MATCH; + +#endif /* UNALIGNED_OK */ + + if (len > best_len) { + s->match_start = cur_match; + best_len = len; + if (len >= nice_match) break; +#ifdef UNALIGNED_OK + scan_end = *(ushf*)(scan+best_len-1); +#else + scan_end1 = scan[best_len-1]; + scan_end = scan[best_len]; +#endif + } + } while ((cur_match = prev[cur_match & wmask]) > limit + && --chain_length != 0); + + if ((uInt)best_len <= s->lookahead) return (uInt)best_len; + return s->lookahead; +} +#endif /* ASMV */ + +#else /* FASTEST */ + +/* --------------------------------------------------------------------------- + * Optimized version for FASTEST only + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + Assert(cur_match < s->strstart, "no future"); + + match = s->window + cur_match; + + /* Return failure if the match length is less than 2: + */ + if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match += 2; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + + if (len < MIN_MATCH) return MIN_MATCH - 1; + + s->match_start = cur_match; + return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; +} + +#endif /* FASTEST */ + +#ifdef ZLIB_DEBUG + +#define EQUAL 0 +/* result of memcmp for equal strings */ + +/* =========================================================================== + * Check that the match at match_start is indeed a match. + */ +local void check_match(s, start, match, length) + deflate_state *s; + IPos start, match; + int length; +{ + /* check that the match is indeed a match */ + if (zmemcmp(s->window + match, + s->window + start, length) != EQUAL) { + fprintf(stderr, " start %u, match %u, length %d\n", + start, match, length); + do { + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); + } while (--length != 0); + z_error("invalid match"); + } + if (z_verbose > 1) { + fprintf(stderr,"\\[%d,%d]", start-match, length); + do { putc(s->window[start++], stderr); } while (--length != 0); + } +} +#else +# define check_match(s, start, match, length) +#endif /* ZLIB_DEBUG */ + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +local void fill_window(s) + deflate_state *s; +{ + unsigned n; + unsigned more; /* Amount of free space at the end of the window. */ + uInt wsize = s->w_size; + + Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + + do { + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); + + /* Deal with !@#$% 64K limit: */ + if (sizeof(int) <= 2) { + if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + more = wsize; + + } else if (more == (unsigned)(-1)) { + /* Very unlikely, but possible on 16 bit machine if + * strstart == 0 && lookahead == 1 (input done a byte at time) + */ + more--; + } + } + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s->strstart >= wsize+MAX_DIST(s)) { + + zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more); + s->match_start -= wsize; + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ + s->block_start -= (long) wsize; + slide_hash(s); + more += wsize; + } + if (s->strm->avail_in == 0) break; + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + Assert(more >= 2, "more < 2"); + + n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); + s->lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s->lookahead + s->insert >= MIN_MATCH) { + uInt str = s->strstart - s->insert; + s->ins_h = s->window[str]; + UPDATE_HASH(s, s->ins_h, s->window[str + 1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + while (s->insert) { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + s->insert--; + if (s->lookahead + s->insert < MIN_MATCH) + break; + } + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ + if (s->high_water < s->window_size) { + ulg curr = s->strstart + (ulg)(s->lookahead); + ulg init; + + if (s->high_water < curr) { + /* Previous high water mark below current data -- zero WIN_INIT + * bytes or up to end of window, whichever is less. + */ + init = s->window_size - curr; + if (init > WIN_INIT) + init = WIN_INIT; + zmemzero(s->window + curr, (unsigned)init); + s->high_water = curr + init; + } + else if (s->high_water < (ulg)curr + WIN_INIT) { + /* High water mark at or above current data, but below current data + * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up + * to end of window, whichever is less. + */ + init = (ulg)curr + WIN_INIT - s->high_water; + if (init > s->window_size - s->high_water) + init = s->window_size - s->high_water; + zmemzero(s->window + s->high_water, (unsigned)init); + s->high_water += init; + } + } + + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "not enough room for search"); +} + +/* =========================================================================== + * Flush the current block, with given end-of-file flag. + * IN assertion: strstart is set to the end of the current match. + */ +#define FLUSH_BLOCK_ONLY(s, last) { \ + _tr_flush_block(s, (s->block_start >= 0L ? \ + (charf *)&s->window[(unsigned)s->block_start] : \ + (charf *)Z_NULL), \ + (ulg)((long)s->strstart - s->block_start), \ + (last)); \ + s->block_start = s->strstart; \ + flush_pending(s->strm); \ + Tracev((stderr,"[FLUSH]")); \ +} + +/* Same but force premature exit if necessary. */ +#define FLUSH_BLOCK(s, last) { \ + FLUSH_BLOCK_ONLY(s, last); \ + if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ +} + +/* Maximum stored block length in deflate format (not including header). */ +#define MAX_STORED 65535 + +/* Minimum of a and b. */ +#define MIN(a, b) ((a) > (b) ? (b) : (a)) + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * + * In case deflateParams() is used to later switch to a non-zero compression + * level, s->matches (otherwise unused when storing) keeps track of the number + * of hash table slides to perform. If s->matches is 1, then one hash table + * slide will be done when switching. If s->matches is 2, the maximum value + * allowed here, then the hash table will be cleared, since two or more slides + * is the same as a clear. + * + * deflate_stored() is written to minimize the number of times an input byte is + * copied. It is most efficient with large input and output buffers, which + * maximizes the opportunites to have a single copy from next_in to next_out. + */ +local block_state deflate_stored(s, flush) + deflate_state *s; + int flush; +{ + /* Smallest worthy block size when not flushing or finishing. By default + * this is 32K. This can be as small as 507 bytes for memLevel == 1. For + * large input and output buffers, the stored block size will be larger. + */ + unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size); + + /* Copy as many min_block or larger stored blocks directly to next_out as + * possible. If flushing, copy the remaining available input to next_out as + * stored blocks, if there is enough space. + */ + unsigned len, left, have, last = 0; + unsigned used = s->strm->avail_in; + do { + /* Set len to the maximum size block that we can copy directly with the + * available input data and output space. Set left to how much of that + * would be copied from what's left in the window. + */ + len = MAX_STORED; /* maximum deflate stored block length */ + have = (s->bi_valid + 42) >> 3; /* number of header bytes */ + if (s->strm->avail_out < have) /* need room for header */ + break; + /* maximum stored block length that will fit in avail_out: */ + have = s->strm->avail_out - have; + left = s->strstart - s->block_start; /* bytes left in window */ + if (len > (ulg)left + s->strm->avail_in) + len = left + s->strm->avail_in; /* limit len to the input */ + if (len > have) + len = have; /* limit len to the output */ + + /* If the stored block would be less than min_block in length, or if + * unable to copy all of the available input when flushing, then try + * copying to the window and the pending buffer instead. Also don't + * write an empty block when flushing -- deflate() does that. + */ + if (len < min_block && ((len == 0 && flush != Z_FINISH) || + flush == Z_NO_FLUSH || + len != left + s->strm->avail_in)) + break; + + /* Make a dummy stored block in pending to get the header bytes, + * including any pending bits. This also updates the debugging counts. + */ + last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0; + _tr_stored_block(s, (char *)0, 0L, last); + + /* Replace the lengths in the dummy stored block with len. */ + s->pending_buf[s->pending - 4] = len; + s->pending_buf[s->pending - 3] = len >> 8; + s->pending_buf[s->pending - 2] = ~len; + s->pending_buf[s->pending - 1] = ~len >> 8; + + /* Write the stored block header bytes. */ + flush_pending(s->strm); + +#ifdef ZLIB_DEBUG + /* Update debugging counts for the data about to be copied. */ + s->compressed_len += len << 3; + s->bits_sent += len << 3; +#endif + + /* Copy uncompressed bytes from the window to next_out. */ + if (left) { + if (left > len) + left = len; + zmemcpy(s->strm->next_out, s->window + s->block_start, left); + s->strm->next_out += left; + s->strm->avail_out -= left; + s->strm->total_out += left; + s->block_start += left; + len -= left; + } + + /* Copy uncompressed bytes directly from next_in to next_out, updating + * the check value. + */ + if (len) { + read_buf(s->strm, s->strm->next_out, len); + s->strm->next_out += len; + s->strm->avail_out -= len; + s->strm->total_out += len; + } + } while (last == 0); + + /* Update the sliding window with the last s->w_size bytes of the copied + * data, or append all of the copied data to the existing window if less + * than s->w_size bytes were copied. Also update the number of bytes to + * insert in the hash tables, in the event that deflateParams() switches to + * a non-zero compression level. + */ + used -= s->strm->avail_in; /* number of input bytes directly copied */ + if (used) { + /* If any input was used, then no unused input remains in the window, + * therefore s->block_start == s->strstart. + */ + if (used >= s->w_size) { /* supplant the previous history */ + s->matches = 2; /* clear hash */ + zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size); + s->strstart = s->w_size; + } + else { + if (s->window_size - s->strstart <= used) { + /* Slide the window down. */ + s->strstart -= s->w_size; + zmemcpy(s->window, s->window + s->w_size, s->strstart); + if (s->matches < 2) + s->matches++; /* add a pending slide_hash() */ + } + zmemcpy(s->window + s->strstart, s->strm->next_in - used, used); + s->strstart += used; + } + s->block_start = s->strstart; + s->insert += MIN(used, s->w_size - s->insert); + } + if (s->high_water < s->strstart) + s->high_water = s->strstart; + + /* If the last block was written to next_out, then done. */ + if (last) + return finish_done; + + /* If flushing and all input has been consumed, then done. */ + if (flush != Z_NO_FLUSH && flush != Z_FINISH && + s->strm->avail_in == 0 && (long)s->strstart == s->block_start) + return block_done; + + /* Fill the window with any remaining input. */ + have = s->window_size - s->strstart - 1; + if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) { + /* Slide the window down. */ + s->block_start -= s->w_size; + s->strstart -= s->w_size; + zmemcpy(s->window, s->window + s->w_size, s->strstart); + if (s->matches < 2) + s->matches++; /* add a pending slide_hash() */ + have += s->w_size; /* more space now */ + } + if (have > s->strm->avail_in) + have = s->strm->avail_in; + if (have) { + read_buf(s->strm, s->window + s->strstart, have); + s->strstart += have; + } + if (s->high_water < s->strstart) + s->high_water = s->strstart; + + /* There was not enough avail_out to write a complete worthy or flushed + * stored block to next_out. Write a stored block to pending instead, if we + * have enough input for a worthy block, or if flushing and there is enough + * room for the remaining input as a stored block in the pending buffer. + */ + have = (s->bi_valid + 42) >> 3; /* number of header bytes */ + /* maximum stored block length that will fit in pending: */ + have = MIN(s->pending_buf_size - have, MAX_STORED); + min_block = MIN(have, s->w_size); + left = s->strstart - s->block_start; + if (left >= min_block || + ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH && + s->strm->avail_in == 0 && left <= have)) { + len = MIN(left, have); + last = flush == Z_FINISH && s->strm->avail_in == 0 && + len == left ? 1 : 0; + _tr_stored_block(s, (charf *)s->window + s->block_start, len, last); + s->block_start += len; + flush_pending(s->strm); + } + + /* We've done all we can with the available input and output. */ + return last ? finish_started : need_more; +} + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +local block_state deflate_fast(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head; /* head of the hash chain */ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = NIL; + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ + } + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->match_start, s->match_length); + + _tr_tally_dist(s, s->strstart - s->match_start, + s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ +#ifndef FASTEST + if (s->match_length <= s->max_insert_length && + s->lookahead >= MIN_MATCH) { + s->match_length--; /* string at strstart already in table */ + do { + s->strstart++; + INSERT_STRING(s, s->strstart, hash_head); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s->match_length != 0); + s->strstart++; + } else +#endif + { + s->strstart += s->match_length; + s->match_length = 0; + s->ins_h = s->window[s->strstart]; + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +#ifndef FASTEST +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +local block_state deflate_slow(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head; /* head of hash chain */ + int bflush; /* set if current block must be flushed */ + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = NIL; + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + */ + s->prev_length = s->match_length, s->prev_match = s->match_start; + s->match_length = MIN_MATCH-1; + + if (hash_head != NIL && s->prev_length < s->max_lazy_match && + s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ + + if (s->match_length <= 5 && (s->strategy == Z_FILTERED +#if TOO_FAR <= 32767 + || (s->match_length == MIN_MATCH && + s->strstart - s->match_start > TOO_FAR) +#endif + )) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s->match_length = MIN_MATCH-1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + check_match(s, s->strstart-1, s->prev_match, s->prev_length); + + _tr_tally_dist(s, s->strstart -1 - s->prev_match, + s->prev_length - MIN_MATCH, bflush); + + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s->lookahead -= s->prev_length-1; + s->prev_length -= 2; + do { + if (++s->strstart <= max_insert) { + INSERT_STRING(s, s->strstart, hash_head); + } + } while (--s->prev_length != 0); + s->match_available = 0; + s->match_length = MIN_MATCH-1; + s->strstart++; + + if (bflush) FLUSH_BLOCK(s, 0); + + } else if (s->match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + if (bflush) { + FLUSH_BLOCK_ONLY(s, 0); + } + s->strstart++; + s->lookahead--; + if (s->strm->avail_out == 0) return need_more; + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s->match_available = 1; + s->strstart++; + s->lookahead--; + } + } + Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s->match_available) { + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally_lit(s, s->window[s->strstart-1], bflush); + s->match_available = 0; + } + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} +#endif /* FASTEST */ + +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) + */ +local block_state deflate_rle(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + uInt prev; /* byte at distance one to match */ + Bytef *scan, *strend; /* scan goes up to strend for length of run */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s->lookahead <= MAX_MATCH) { + fill_window(s); + if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* See how many times the previous byte repeats */ + s->match_length = 0; + if (s->lookahead >= MIN_MATCH && s->strstart > 0) { + scan = s->window + s->strstart - 1; + prev = *scan; + if (prev == *++scan && prev == *++scan && prev == *++scan) { + strend = s->window + s->strstart + MAX_MATCH; + do { + } while (prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + scan < strend); + s->match_length = MAX_MATCH - (uInt)(strend - scan); + if (s->match_length > s->lookahead) + s->match_length = s->lookahead; + } + Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } + + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->strstart - 1, s->match_length); + + _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + s->strstart += s->match_length; + s->match_length = 0; + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +local block_state deflate_huff(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s->lookahead == 0) { + fill_window(s); + if (s->lookahead == 0) { + if (flush == Z_NO_FLUSH) + return need_more; + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s->match_length = 0; + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} diff --git a/deps/zlib-1.2.11/src/deflate.h b/deps/zlib-1.2.11/src/deflate.h new file mode 100644 index 000000000000..23ecdd312bc0 --- /dev/null +++ b/deps/zlib-1.2.11/src/deflate.h @@ -0,0 +1,349 @@ +/* deflate.h -- internal compression state + * Copyright (C) 1995-2016 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id$ */ + +#ifndef DEFLATE_H +#define DEFLATE_H + +#include "zutil.h" + +/* define NO_GZIP when compiling if you want to disable gzip header and + trailer creation by deflate(). NO_GZIP would be used to avoid linking in + the crc code when it is not needed. For shared libraries, gzip encoding + should be left enabled. */ +#ifndef NO_GZIP +# define GZIP +#endif + +/* =========================================================================== + * Internal compression state. + */ + +#define LENGTH_CODES 29 +/* number of length codes, not counting the special END_BLOCK code */ + +#define LITERALS 256 +/* number of literal bytes 0..255 */ + +#define L_CODES (LITERALS+1+LENGTH_CODES) +/* number of Literal or Length codes, including the END_BLOCK code */ + +#define D_CODES 30 +/* number of distance codes */ + +#define BL_CODES 19 +/* number of codes used to transfer the bit lengths */ + +#define HEAP_SIZE (2*L_CODES+1) +/* maximum heap size */ + +#define MAX_BITS 15 +/* All codes must not exceed MAX_BITS bits */ + +#define Buf_size 16 +/* size of bit buffer in bi_buf */ + +#define INIT_STATE 42 /* zlib header -> BUSY_STATE */ +#ifdef GZIP +# define GZIP_STATE 57 /* gzip header -> BUSY_STATE | EXTRA_STATE */ +#endif +#define EXTRA_STATE 69 /* gzip extra block -> NAME_STATE */ +#define NAME_STATE 73 /* gzip file name -> COMMENT_STATE */ +#define COMMENT_STATE 91 /* gzip comment -> HCRC_STATE */ +#define HCRC_STATE 103 /* gzip header CRC -> BUSY_STATE */ +#define BUSY_STATE 113 /* deflate -> FINISH_STATE */ +#define FINISH_STATE 666 /* stream complete */ +/* Stream status */ + + +/* Data structure describing a single value and its code string. */ +typedef struct ct_data_s { + union { + ush freq; /* frequency count */ + ush code; /* bit string */ + } fc; + union { + ush dad; /* father node in Huffman tree */ + ush len; /* length of bit string */ + } dl; +} FAR ct_data; + +#define Freq fc.freq +#define Code fc.code +#define Dad dl.dad +#define Len dl.len + +typedef struct static_tree_desc_s static_tree_desc; + +typedef struct tree_desc_s { + ct_data *dyn_tree; /* the dynamic tree */ + int max_code; /* largest code with non zero frequency */ + const static_tree_desc *stat_desc; /* the corresponding static tree */ +} FAR tree_desc; + +typedef ush Pos; +typedef Pos FAR Posf; +typedef unsigned IPos; + +/* A Pos is an index in the character window. We use short instead of int to + * save space in the various tables. IPos is used only for parameter passing. + */ + +typedef struct internal_state { + z_streamp strm; /* pointer back to this zlib stream */ + int status; /* as the name implies */ + Bytef *pending_buf; /* output still pending */ + ulg pending_buf_size; /* size of pending_buf */ + Bytef *pending_out; /* next pending byte to output to the stream */ + ulg pending; /* nb of bytes in the pending buffer */ + int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ + gz_headerp gzhead; /* gzip header information to write */ + ulg gzindex; /* where in extra, name, or comment */ + Byte method; /* can only be DEFLATED */ + int last_flush; /* value of flush param for previous deflate call */ + + /* used by deflate.c: */ + + uInt w_size; /* LZ77 window size (32K by default) */ + uInt w_bits; /* log2(w_size) (8..16) */ + uInt w_mask; /* w_size - 1 */ + + Bytef *window; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. Also, it limits + * the window size to 64K, which is quite useful on MSDOS. + * To do: use the user input buffer as sliding window. + */ + + ulg window_size; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + Posf *prev; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + Posf *head; /* Heads of the hash chains or NIL. */ + + uInt ins_h; /* hash index of string to be inserted */ + uInt hash_size; /* number of elements in hash table */ + uInt hash_bits; /* log2(hash_size) */ + uInt hash_mask; /* hash_size-1 */ + + uInt hash_shift; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + long block_start; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + uInt match_length; /* length of best match */ + IPos prev_match; /* previous match */ + int match_available; /* set if previous match exists */ + uInt strstart; /* start of string to insert */ + uInt match_start; /* start of matching string */ + uInt lookahead; /* number of valid bytes ahead in window */ + + uInt prev_length; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + uInt max_chain_length; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + uInt max_lazy_match; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ +# define max_insert_length max_lazy_match + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + int level; /* compression level (1..9) */ + int strategy; /* favor or force Huffman coding*/ + + uInt good_match; + /* Use a faster search when the previous match is longer than this */ + + int nice_match; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + /* Didn't use ct_data typedef below to suppress compiler warning */ + struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + struct tree_desc_s l_desc; /* desc. for literal tree */ + struct tree_desc_s d_desc; /* desc. for distance tree */ + struct tree_desc_s bl_desc; /* desc. for bit length tree */ + + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + int heap_len; /* number of elements in the heap */ + int heap_max; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + uch depth[2*L_CODES+1]; + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + uchf *l_buf; /* buffer for literals or lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + uInt last_lit; /* running index in l_buf */ + + ushf *d_buf; + /* Buffer for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ + uInt matches; /* number of string matches in current block */ + uInt insert; /* bytes at end of window left to insert */ + +#ifdef ZLIB_DEBUG + ulg compressed_len; /* total bit length of compressed file mod 2^32 */ + ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ +#endif + + ush bi_buf; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + int bi_valid; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + + ulg high_water; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ + +} FAR deflate_state; + +/* Output a byte on the stream. + * IN assertion: there is enough room in pending_buf. + */ +#define put_byte(s, c) {s->pending_buf[s->pending++] = (Bytef)(c);} + + +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) +/* Minimum amount of lookahead, except at the end of the input file. + * See deflate.c for comments about the MIN_MATCH+1. + */ + +#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) +/* In order to simplify the code, particularly on 16 bit machines, match + * distances are limited to MAX_DIST instead of WSIZE. + */ + +#define WIN_INIT MAX_MATCH +/* Number of bytes after end of data in window to initialize in order to avoid + memory checker errors from longest match routines */ + + /* in trees.c */ +void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); +int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); +void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); +void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); + +#define d_code(dist) \ + ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) +/* Mapping from a distance to a distance code. dist is the distance - 1 and + * must not have side effects. _dist_code[256] and _dist_code[257] are never + * used. + */ + +#ifndef ZLIB_DEBUG +/* Inline versions of _tr_tally for speed: */ + +#if defined(GEN_TREES_H) || !defined(STDC) + extern uch ZLIB_INTERNAL _length_code[]; + extern uch ZLIB_INTERNAL _dist_code[]; +#else + extern const uch ZLIB_INTERNAL _length_code[]; + extern const uch ZLIB_INTERNAL _dist_code[]; +#endif + +# define _tr_tally_lit(s, c, flush) \ + { uch cc = (c); \ + s->d_buf[s->last_lit] = 0; \ + s->l_buf[s->last_lit++] = cc; \ + s->dyn_ltree[cc].Freq++; \ + flush = (s->last_lit == s->lit_bufsize-1); \ + } +# define _tr_tally_dist(s, distance, length, flush) \ + { uch len = (uch)(length); \ + ush dist = (ush)(distance); \ + s->d_buf[s->last_lit] = dist; \ + s->l_buf[s->last_lit++] = len; \ + dist--; \ + s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ + s->dyn_dtree[d_code(dist)].Freq++; \ + flush = (s->last_lit == s->lit_bufsize-1); \ + } +#else +# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) +# define _tr_tally_dist(s, distance, length, flush) \ + flush = _tr_tally(s, distance, length) +#endif + +#endif /* DEFLATE_H */ diff --git a/deps/zlib-1.2.11/src/gzclose.c b/deps/zlib-1.2.11/src/gzclose.c new file mode 100644 index 000000000000..caeb99a3177f --- /dev/null +++ b/deps/zlib-1.2.11/src/gzclose.c @@ -0,0 +1,25 @@ +/* gzclose.c -- zlib gzclose() function + * Copyright (C) 2004, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* gzclose() is in a separate file so that it is linked in only if it is used. + That way the other gzclose functions can be used instead to avoid linking in + unneeded compression or decompression routines. */ +int ZEXPORT gzclose(file) + gzFile file; +{ +#ifndef NO_GZCOMPRESS + gz_statep state; + + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + return state->mode == GZ_READ ? gzclose_r(file) : gzclose_w(file); +#else + return gzclose_r(file); +#endif +} diff --git a/deps/zlib-1.2.11/src/gzguts.h b/deps/zlib-1.2.11/src/gzguts.h new file mode 100644 index 000000000000..990a4d251493 --- /dev/null +++ b/deps/zlib-1.2.11/src/gzguts.h @@ -0,0 +1,218 @@ +/* gzguts.h -- zlib internal header definitions for gz* operations + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#ifdef _LARGEFILE64_SOURCE +# ifndef _LARGEFILE_SOURCE +# define _LARGEFILE_SOURCE 1 +# endif +# ifdef _FILE_OFFSET_BITS +# undef _FILE_OFFSET_BITS +# endif +#endif + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include +#include "zlib.h" +#ifdef STDC +# include +# include +# include +#endif + +#ifndef _POSIX_SOURCE +# define _POSIX_SOURCE +#endif +#include + +#ifdef _WIN32 +# include +#endif + +#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) +# include +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) +# define WIDECHAR +#endif + +#ifdef WINAPI_FAMILY +# define open _open +# define read _read +# define write _write +# define close _close +#endif + +#ifdef NO_DEFLATE /* for compatibility with old definition */ +# define NO_GZCOMPRESS +#endif + +#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(__CYGWIN__) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#ifndef HAVE_VSNPRINTF +# ifdef MSDOS +/* vsnprintf may exist on some MS-DOS compilers (DJGPP?), + but for now we just assume it doesn't. */ +# define NO_vsnprintf +# endif +# ifdef __TURBOC__ +# define NO_vsnprintf +# endif +# ifdef WIN32 +/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ +# if !defined(vsnprintf) && !defined(NO_vsnprintf) +# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) +# define vsnprintf _vsnprintf +# endif +# endif +# endif +# ifdef __SASC +# define NO_vsnprintf +# endif +# ifdef VMS +# define NO_vsnprintf +# endif +# ifdef __OS400__ +# define NO_vsnprintf +# endif +# ifdef __MVS__ +# define NO_vsnprintf +# endif +#endif + +/* unlike snprintf (which is required in C99), _snprintf does not guarantee + null termination of the result -- however this is only used in gzlib.c where + the result is assured to fit in the space provided */ +#if defined(_MSC_VER) && _MSC_VER < 1900 +# define snprintf _snprintf +#endif + +#ifndef local +# define local static +#endif +/* since "static" is used to mean two completely different things in C, we + define "local" for the non-static meaning of "static", for readability + (compile with -Dlocal if your debugger can't find static symbols) */ + +/* gz* functions always use library allocation functions */ +#ifndef STDC + extern voidp malloc OF((uInt size)); + extern void free OF((voidpf ptr)); +#endif + +/* get errno and strerror definition */ +#if defined UNDER_CE +# include +# define zstrerror() gz_strwinerror((DWORD)GetLastError()) +#else +# ifndef NO_STRERROR +# include +# define zstrerror() strerror(errno) +# else +# define zstrerror() "stdio error (consult errno)" +# endif +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); +#endif + +/* default memLevel */ +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif + +/* default i/o buffer size -- double this for output when reading (this and + twice this must be able to fit in an unsigned type) */ +#define GZBUFSIZE 8192 + +/* gzip modes, also provide a little integrity check on the passed structure */ +#define GZ_NONE 0 +#define GZ_READ 7247 +#define GZ_WRITE 31153 +#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ + +/* values for gz_state how */ +#define LOOK 0 /* look for a gzip header */ +#define COPY 1 /* copy input directly */ +#define GZIP 2 /* decompress a gzip stream */ + +/* internal gzip file state data structure */ +typedef struct { + /* exposed contents for gzgetc() macro */ + struct gzFile_s x; /* "x" for exposed */ + /* x.have: number of bytes available at x.next */ + /* x.next: next output data to deliver or write */ + /* x.pos: current position in uncompressed data */ + /* used for both reading and writing */ + int mode; /* see gzip modes above */ + int fd; /* file descriptor */ + char *path; /* path or fd for error messages */ + unsigned size; /* buffer size, zero if not allocated yet */ + unsigned want; /* requested buffer size, default is GZBUFSIZE */ + unsigned char *in; /* input buffer (double-sized when writing) */ + unsigned char *out; /* output buffer (double-sized when reading) */ + int direct; /* 0 if processing gzip, 1 if transparent */ + /* just for reading */ + int how; /* 0: get header, 1: copy, 2: decompress */ + z_off64_t start; /* where the gzip data started, for rewinding */ + int eof; /* true if end of input file reached */ + int past; /* true if read requested past end */ + /* just for writing */ + int level; /* compression level */ + int strategy; /* compression strategy */ + /* seek request */ + z_off64_t skip; /* amount to skip (already rewound if backwards) */ + int seek; /* true if seek request pending */ + /* error information */ + int err; /* error code */ + char *msg; /* error message */ + /* zlib inflate or deflate stream */ + z_stream strm; /* stream structure in-place (not a pointer) */ +} gz_state; +typedef gz_state FAR *gz_statep; + +/* shared functions */ +void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); +#if defined UNDER_CE +char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); +#endif + +/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t + value -- needed when comparing unsigned to z_off64_t, which is signed + (possible z_off64_t types off_t, off64_t, and long are all signed) */ +#ifdef INT_MAX +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) +#else +unsigned ZLIB_INTERNAL gz_intmax OF((void)); +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) +#endif diff --git a/deps/zlib-1.2.11/src/gzlib.c b/deps/zlib-1.2.11/src/gzlib.c new file mode 100644 index 000000000000..4105e6aff925 --- /dev/null +++ b/deps/zlib-1.2.11/src/gzlib.c @@ -0,0 +1,637 @@ +/* gzlib.c -- zlib functions common to reading and writing gzip files + * Copyright (C) 2004-2017 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +#if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__) +# define LSEEK _lseeki64 +#else +#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 +# define LSEEK lseek64 +#else +# define LSEEK lseek +#endif +#endif + +/* Local functions */ +local void gz_reset OF((gz_statep)); +local gzFile gz_open OF((const void *, int, const char *)); + +#if defined UNDER_CE + +/* Map the Windows error number in ERROR to a locale-dependent error message + string and return a pointer to it. Typically, the values for ERROR come + from GetLastError. + + The string pointed to shall not be modified by the application, but may be + overwritten by a subsequent call to gz_strwinerror + + The gz_strwinerror function does not change the current setting of + GetLastError. */ +char ZLIB_INTERNAL *gz_strwinerror (error) + DWORD error; +{ + static char buf[1024]; + + wchar_t *msgbuf; + DWORD lasterr = GetLastError(); + DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM + | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, + error, + 0, /* Default language */ + (LPVOID)&msgbuf, + 0, + NULL); + if (chars != 0) { + /* If there is an \r\n appended, zap it. */ + if (chars >= 2 + && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { + chars -= 2; + msgbuf[chars] = 0; + } + + if (chars > sizeof (buf) - 1) { + chars = sizeof (buf) - 1; + msgbuf[chars] = 0; + } + + wcstombs(buf, msgbuf, chars + 1); + LocalFree(msgbuf); + } + else { + sprintf(buf, "unknown win32 error (%ld)", error); + } + + SetLastError(lasterr); + return buf; +} + +#endif /* UNDER_CE */ + +/* Reset gzip file state */ +local void gz_reset(state) + gz_statep state; +{ + state->x.have = 0; /* no output data available */ + if (state->mode == GZ_READ) { /* for reading ... */ + state->eof = 0; /* not at end of file */ + state->past = 0; /* have not read past end yet */ + state->how = LOOK; /* look for gzip header */ + } + state->seek = 0; /* no seek request pending */ + gz_error(state, Z_OK, NULL); /* clear error */ + state->x.pos = 0; /* no uncompressed data yet */ + state->strm.avail_in = 0; /* no input data yet */ +} + +/* Open a gzip file either by name or file descriptor. */ +local gzFile gz_open(path, fd, mode) + const void *path; + int fd; + const char *mode; +{ + gz_statep state; + z_size_t len; + int oflag; +#ifdef O_CLOEXEC + int cloexec = 0; +#endif +#ifdef O_EXCL + int exclusive = 0; +#endif + + /* check input */ + if (path == NULL) + return NULL; + + /* allocate gzFile structure to return */ + state = (gz_statep)malloc(sizeof(gz_state)); + if (state == NULL) + return NULL; + state->size = 0; /* no buffers allocated yet */ + state->want = GZBUFSIZE; /* requested buffer size */ + state->msg = NULL; /* no error message yet */ + + /* interpret mode */ + state->mode = GZ_NONE; + state->level = Z_DEFAULT_COMPRESSION; + state->strategy = Z_DEFAULT_STRATEGY; + state->direct = 0; + while (*mode) { + if (*mode >= '0' && *mode <= '9') + state->level = *mode - '0'; + else + switch (*mode) { + case 'r': + state->mode = GZ_READ; + break; +#ifndef NO_GZCOMPRESS + case 'w': + state->mode = GZ_WRITE; + break; + case 'a': + state->mode = GZ_APPEND; + break; +#endif + case '+': /* can't read and write at the same time */ + free(state); + return NULL; + case 'b': /* ignore -- will request binary anyway */ + break; +#ifdef O_CLOEXEC + case 'e': + cloexec = 1; + break; +#endif +#ifdef O_EXCL + case 'x': + exclusive = 1; + break; +#endif + case 'f': + state->strategy = Z_FILTERED; + break; + case 'h': + state->strategy = Z_HUFFMAN_ONLY; + break; + case 'R': + state->strategy = Z_RLE; + break; + case 'F': + state->strategy = Z_FIXED; + break; + case 'T': + state->direct = 1; + break; + default: /* could consider as an error, but just ignore */ + ; + } + mode++; + } + + /* must provide an "r", "w", or "a" */ + if (state->mode == GZ_NONE) { + free(state); + return NULL; + } + + /* can't force transparent read */ + if (state->mode == GZ_READ) { + if (state->direct) { + free(state); + return NULL; + } + state->direct = 1; /* for empty file */ + } + + /* save the path name for error messages */ +#ifdef WIDECHAR + if (fd == -2) { + len = wcstombs(NULL, path, 0); + if (len == (z_size_t)-1) + len = 0; + } + else +#endif + len = strlen((const char *)path); + state->path = (char *)malloc(len + 1); + if (state->path == NULL) { + free(state); + return NULL; + } +#ifdef WIDECHAR + if (fd == -2) + if (len) + wcstombs(state->path, path, len + 1); + else + *(state->path) = 0; + else +#endif +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + (void)snprintf(state->path, len + 1, "%s", (const char *)path); +#else + strcpy(state->path, path); +#endif + + /* compute the flags for open() */ + oflag = +#ifdef O_LARGEFILE + O_LARGEFILE | +#endif +#ifdef O_BINARY + O_BINARY | +#endif +#ifdef O_CLOEXEC + (cloexec ? O_CLOEXEC : 0) | +#endif + (state->mode == GZ_READ ? + O_RDONLY : + (O_WRONLY | O_CREAT | +#ifdef O_EXCL + (exclusive ? O_EXCL : 0) | +#endif + (state->mode == GZ_WRITE ? + O_TRUNC : + O_APPEND))); + + /* open the file with the appropriate flags (or just use fd) */ + state->fd = fd > -1 ? fd : ( +#ifdef WIDECHAR + fd == -2 ? _wopen(path, oflag, 0666) : +#endif + open((const char *)path, oflag, 0666)); + if (state->fd == -1) { + free(state->path); + free(state); + return NULL; + } + if (state->mode == GZ_APPEND) { + LSEEK(state->fd, 0, SEEK_END); /* so gzoffset() is correct */ + state->mode = GZ_WRITE; /* simplify later checks */ + } + + /* save the current position for rewinding (only if reading) */ + if (state->mode == GZ_READ) { + state->start = LSEEK(state->fd, 0, SEEK_CUR); + if (state->start == -1) state->start = 0; + } + + /* initialize stream */ + gz_reset(state); + + /* return stream */ + return (gzFile)state; +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzopen(path, mode) + const char *path; + const char *mode; +{ + return gz_open(path, -1, mode); +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzopen64(path, mode) + const char *path; + const char *mode; +{ + return gz_open(path, -1, mode); +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzdopen(fd, mode) + int fd; + const char *mode; +{ + char *path; /* identifier for error messages */ + gzFile gz; + + if (fd == -1 || (path = (char *)malloc(7 + 3 * sizeof(int))) == NULL) + return NULL; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + (void)snprintf(path, 7 + 3 * sizeof(int), "", fd); +#else + sprintf(path, "", fd); /* for debugging */ +#endif + gz = gz_open(path, fd, mode); + free(path); + return gz; +} + +/* -- see zlib.h -- */ +#ifdef WIDECHAR +gzFile ZEXPORT gzopen_w(path, mode) + const wchar_t *path; + const char *mode; +{ + return gz_open(path, -2, mode); +} +#endif + +/* -- see zlib.h -- */ +int ZEXPORT gzbuffer(file, size) + gzFile file; + unsigned size; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* make sure we haven't already allocated memory */ + if (state->size != 0) + return -1; + + /* check and set requested size */ + if ((size << 1) < size) + return -1; /* need to be able to double it */ + if (size < 2) + size = 2; /* need two bytes to check magic header */ + state->want = size; + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzrewind(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* back up and start over */ + if (LSEEK(state->fd, state->start, SEEK_SET) == -1) + return -1; + gz_reset(state); + return 0; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gzseek64(file, offset, whence) + gzFile file; + z_off64_t offset; + int whence; +{ + unsigned n; + z_off64_t ret; + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* check that there's no error */ + if (state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + + /* can only seek from start or relative to current position */ + if (whence != SEEK_SET && whence != SEEK_CUR) + return -1; + + /* normalize offset to a SEEK_CUR specification */ + if (whence == SEEK_SET) + offset -= state->x.pos; + else if (state->seek) + offset += state->skip; + state->seek = 0; + + /* if within raw area while reading, just go there */ + if (state->mode == GZ_READ && state->how == COPY && + state->x.pos + offset >= 0) { + ret = LSEEK(state->fd, offset - state->x.have, SEEK_CUR); + if (ret == -1) + return -1; + state->x.have = 0; + state->eof = 0; + state->past = 0; + state->seek = 0; + gz_error(state, Z_OK, NULL); + state->strm.avail_in = 0; + state->x.pos += offset; + return state->x.pos; + } + + /* calculate skip amount, rewinding if needed for back seek when reading */ + if (offset < 0) { + if (state->mode != GZ_READ) /* writing -- can't go backwards */ + return -1; + offset += state->x.pos; + if (offset < 0) /* before start of file! */ + return -1; + if (gzrewind(file) == -1) /* rewind, then skip to offset */ + return -1; + } + + /* if reading, skip what's in output buffer (one less gzgetc() check) */ + if (state->mode == GZ_READ) { + n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > offset ? + (unsigned)offset : state->x.have; + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + offset -= n; + } + + /* request skip (if not zero) */ + if (offset) { + state->seek = 1; + state->skip = offset; + } + return state->x.pos + offset; +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gzseek(file, offset, whence) + gzFile file; + z_off_t offset; + int whence; +{ + z_off64_t ret; + + ret = gzseek64(file, (z_off64_t)offset, whence); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gztell64(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* return position */ + return state->x.pos + (state->seek ? state->skip : 0); +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gztell(file) + gzFile file; +{ + z_off64_t ret; + + ret = gztell64(file); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gzoffset64(file) + gzFile file; +{ + z_off64_t offset; + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* compute and return effective offset in file */ + offset = LSEEK(state->fd, 0, SEEK_CUR); + if (offset == -1) + return -1; + if (state->mode == GZ_READ) /* reading */ + offset -= state->strm.avail_in; /* don't count buffered input */ + return offset; +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gzoffset(file) + gzFile file; +{ + z_off64_t ret; + + ret = gzoffset64(file); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzeof(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return 0; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return 0; + + /* return end-of-file state */ + return state->mode == GZ_READ ? state->past : 0; +} + +/* -- see zlib.h -- */ +const char * ZEXPORT gzerror(file, errnum) + gzFile file; + int *errnum; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return NULL; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return NULL; + + /* return error information */ + if (errnum != NULL) + *errnum = state->err; + return state->err == Z_MEM_ERROR ? "out of memory" : + (state->msg == NULL ? "" : state->msg); +} + +/* -- see zlib.h -- */ +void ZEXPORT gzclearerr(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return; + + /* clear error and end-of-file */ + if (state->mode == GZ_READ) { + state->eof = 0; + state->past = 0; + } + gz_error(state, Z_OK, NULL); +} + +/* Create an error message in allocated memory and set state->err and + state->msg accordingly. Free any previous error message already there. Do + not try to free or allocate space if the error is Z_MEM_ERROR (out of + memory). Simply save the error message as a static string. If there is an + allocation failure constructing the error message, then convert the error to + out of memory. */ +void ZLIB_INTERNAL gz_error(state, err, msg) + gz_statep state; + int err; + const char *msg; +{ + /* free previously allocated message and clear */ + if (state->msg != NULL) { + if (state->err != Z_MEM_ERROR) + free(state->msg); + state->msg = NULL; + } + + /* if fatal, set state->x.have to 0 so that the gzgetc() macro fails */ + if (err != Z_OK && err != Z_BUF_ERROR) + state->x.have = 0; + + /* set error code, and if no message, then done */ + state->err = err; + if (msg == NULL) + return; + + /* for an out of memory error, return literal string when requested */ + if (err == Z_MEM_ERROR) + return; + + /* construct error message with path */ + if ((state->msg = (char *)malloc(strlen(state->path) + strlen(msg) + 3)) == + NULL) { + state->err = Z_MEM_ERROR; + return; + } +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + (void)snprintf(state->msg, strlen(state->path) + strlen(msg) + 3, + "%s%s%s", state->path, ": ", msg); +#else + strcpy(state->msg, state->path); + strcat(state->msg, ": "); + strcat(state->msg, msg); +#endif +} + +#ifndef INT_MAX +/* portably return maximum value for an int (when limits.h presumed not + available) -- we need to do this to cover cases where 2's complement not + used, since C standard permits 1's complement and sign-bit representations, + otherwise we could just use ((unsigned)-1) >> 1 */ +unsigned ZLIB_INTERNAL gz_intmax() +{ + unsigned p, q; + + p = 1; + do { + q = p; + p <<= 1; + p++; + } while (p > q); + return q >> 1; +} +#endif diff --git a/deps/zlib-1.2.11/src/gzread.c b/deps/zlib-1.2.11/src/gzread.c new file mode 100644 index 000000000000..956b91ea7d9e --- /dev/null +++ b/deps/zlib-1.2.11/src/gzread.c @@ -0,0 +1,654 @@ +/* gzread.c -- zlib functions for reading gzip files + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* Local functions */ +local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *)); +local int gz_avail OF((gz_statep)); +local int gz_look OF((gz_statep)); +local int gz_decomp OF((gz_statep)); +local int gz_fetch OF((gz_statep)); +local int gz_skip OF((gz_statep, z_off64_t)); +local z_size_t gz_read OF((gz_statep, voidp, z_size_t)); + +/* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from + state->fd, and update state->eof, state->err, and state->msg as appropriate. + This function needs to loop on read(), since read() is not guaranteed to + read the number of bytes requested, depending on the type of descriptor. */ +local int gz_load(state, buf, len, have) + gz_statep state; + unsigned char *buf; + unsigned len; + unsigned *have; +{ + int ret; + unsigned get, max = ((unsigned)-1 >> 2) + 1; + + *have = 0; + do { + get = len - *have; + if (get > max) + get = max; + ret = read(state->fd, buf + *have, get); + if (ret <= 0) + break; + *have += (unsigned)ret; + } while (*have < len); + if (ret < 0) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + if (ret == 0) + state->eof = 1; + return 0; +} + +/* Load up input buffer and set eof flag if last data loaded -- return -1 on + error, 0 otherwise. Note that the eof flag is set when the end of the input + file is reached, even though there may be unused data in the buffer. Once + that data has been used, no more attempts will be made to read the file. + If strm->avail_in != 0, then the current data is moved to the beginning of + the input buffer, and then the remainder of the buffer is loaded with the + available data from the input file. */ +local int gz_avail(state) + gz_statep state; +{ + unsigned got; + z_streamp strm = &(state->strm); + + if (state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + if (state->eof == 0) { + if (strm->avail_in) { /* copy what's there to the start */ + unsigned char *p = state->in; + unsigned const char *q = strm->next_in; + unsigned n = strm->avail_in; + do { + *p++ = *q++; + } while (--n); + } + if (gz_load(state, state->in + strm->avail_in, + state->size - strm->avail_in, &got) == -1) + return -1; + strm->avail_in += got; + strm->next_in = state->in; + } + return 0; +} + +/* Look for gzip header, set up for inflate or copy. state->x.have must be 0. + If this is the first time in, allocate required memory. state->how will be + left unchanged if there is no more input data available, will be set to COPY + if there is no gzip header and direct copying will be performed, or it will + be set to GZIP for decompression. If direct copying, then leftover input + data from the input buffer will be copied to the output buffer. In that + case, all further file reads will be directly to either the output buffer or + a user buffer. If decompressing, the inflate state will be initialized. + gz_look() will return 0 on success or -1 on failure. */ +local int gz_look(state) + gz_statep state; +{ + z_streamp strm = &(state->strm); + + /* allocate read buffers and inflate memory */ + if (state->size == 0) { + /* allocate buffers */ + state->in = (unsigned char *)malloc(state->want); + state->out = (unsigned char *)malloc(state->want << 1); + if (state->in == NULL || state->out == NULL) { + free(state->out); + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + state->size = state->want; + + /* allocate inflate memory */ + state->strm.zalloc = Z_NULL; + state->strm.zfree = Z_NULL; + state->strm.opaque = Z_NULL; + state->strm.avail_in = 0; + state->strm.next_in = Z_NULL; + if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */ + free(state->out); + free(state->in); + state->size = 0; + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + } + + /* get at least the magic bytes in the input buffer */ + if (strm->avail_in < 2) { + if (gz_avail(state) == -1) + return -1; + if (strm->avail_in == 0) + return 0; + } + + /* look for gzip magic bytes -- if there, do gzip decoding (note: there is + a logical dilemma here when considering the case of a partially written + gzip file, to wit, if a single 31 byte is written, then we cannot tell + whether this is a single-byte file, or just a partially written gzip + file -- for here we assume that if a gzip file is being written, then + the header will be written in a single operation, so that reading a + single byte is sufficient indication that it is not a gzip file) */ + if (strm->avail_in > 1 && + strm->next_in[0] == 31 && strm->next_in[1] == 139) { + inflateReset(strm); + state->how = GZIP; + state->direct = 0; + return 0; + } + + /* no gzip header -- if we were decoding gzip before, then this is trailing + garbage. Ignore the trailing garbage and finish. */ + if (state->direct == 0) { + strm->avail_in = 0; + state->eof = 1; + state->x.have = 0; + return 0; + } + + /* doing raw i/o, copy any leftover input to output -- this assumes that + the output buffer is larger than the input buffer, which also assures + space for gzungetc() */ + state->x.next = state->out; + if (strm->avail_in) { + memcpy(state->x.next, strm->next_in, strm->avail_in); + state->x.have = strm->avail_in; + strm->avail_in = 0; + } + state->how = COPY; + state->direct = 1; + return 0; +} + +/* Decompress from input to the provided next_out and avail_out in the state. + On return, state->x.have and state->x.next point to the just decompressed + data. If the gzip stream completes, state->how is reset to LOOK to look for + the next gzip stream or raw data, once state->x.have is depleted. Returns 0 + on success, -1 on failure. */ +local int gz_decomp(state) + gz_statep state; +{ + int ret = Z_OK; + unsigned had; + z_streamp strm = &(state->strm); + + /* fill output buffer up to end of deflate stream */ + had = strm->avail_out; + do { + /* get more input for inflate() */ + if (strm->avail_in == 0 && gz_avail(state) == -1) + return -1; + if (strm->avail_in == 0) { + gz_error(state, Z_BUF_ERROR, "unexpected end of file"); + break; + } + + /* decompress and handle errors */ + ret = inflate(strm, Z_NO_FLUSH); + if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) { + gz_error(state, Z_STREAM_ERROR, + "internal error: inflate stream corrupt"); + return -1; + } + if (ret == Z_MEM_ERROR) { + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + if (ret == Z_DATA_ERROR) { /* deflate stream invalid */ + gz_error(state, Z_DATA_ERROR, + strm->msg == NULL ? "compressed data error" : strm->msg); + return -1; + } + } while (strm->avail_out && ret != Z_STREAM_END); + + /* update available output */ + state->x.have = had - strm->avail_out; + state->x.next = strm->next_out - state->x.have; + + /* if the gzip stream completed successfully, look for another */ + if (ret == Z_STREAM_END) + state->how = LOOK; + + /* good decompression */ + return 0; +} + +/* Fetch data and put it in the output buffer. Assumes state->x.have is 0. + Data is either copied from the input file or decompressed from the input + file depending on state->how. If state->how is LOOK, then a gzip header is + looked for to determine whether to copy or decompress. Returns -1 on error, + otherwise 0. gz_fetch() will leave state->how as COPY or GZIP unless the + end of the input file has been reached and all data has been processed. */ +local int gz_fetch(state) + gz_statep state; +{ + z_streamp strm = &(state->strm); + + do { + switch(state->how) { + case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */ + if (gz_look(state) == -1) + return -1; + if (state->how == LOOK) + return 0; + break; + case COPY: /* -> COPY */ + if (gz_load(state, state->out, state->size << 1, &(state->x.have)) + == -1) + return -1; + state->x.next = state->out; + return 0; + case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */ + strm->avail_out = state->size << 1; + strm->next_out = state->out; + if (gz_decomp(state) == -1) + return -1; + } + } while (state->x.have == 0 && (!state->eof || strm->avail_in)); + return 0; +} + +/* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */ +local int gz_skip(state, len) + gz_statep state; + z_off64_t len; +{ + unsigned n; + + /* skip over len bytes or reach end-of-file, whichever comes first */ + while (len) + /* skip over whatever is in output buffer */ + if (state->x.have) { + n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ? + (unsigned)len : state->x.have; + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + len -= n; + } + + /* output buffer empty -- return if we're at the end of the input */ + else if (state->eof && state->strm.avail_in == 0) + break; + + /* need more data to skip -- load up output buffer */ + else { + /* get more output, looking for header if required */ + if (gz_fetch(state) == -1) + return -1; + } + return 0; +} + +/* Read len bytes into buf from file, or less than len up to the end of the + input. Return the number of bytes read. If zero is returned, either the + end of file was reached, or there was an error. state->err must be + consulted in that case to determine which. */ +local z_size_t gz_read(state, buf, len) + gz_statep state; + voidp buf; + z_size_t len; +{ + z_size_t got; + unsigned n; + + /* if len is zero, avoid unnecessary operations */ + if (len == 0) + return 0; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return 0; + } + + /* get len bytes to buf, or less than len if at the end */ + got = 0; + do { + /* set n to the maximum amount of len that fits in an unsigned int */ + n = -1; + if (n > len) + n = len; + + /* first just try copying data from the output buffer */ + if (state->x.have) { + if (state->x.have < n) + n = state->x.have; + memcpy(buf, state->x.next, n); + state->x.next += n; + state->x.have -= n; + } + + /* output buffer empty -- return if we're at the end of the input */ + else if (state->eof && state->strm.avail_in == 0) { + state->past = 1; /* tried to read past end */ + break; + } + + /* need output data -- for small len or new stream load up our output + buffer */ + else if (state->how == LOOK || n < (state->size << 1)) { + /* get more output, looking for header if required */ + if (gz_fetch(state) == -1) + return 0; + continue; /* no progress yet -- go back to copy above */ + /* the copy above assures that we will leave with space in the + output buffer, allowing at least one gzungetc() to succeed */ + } + + /* large len -- read directly into user buffer */ + else if (state->how == COPY) { /* read directly */ + if (gz_load(state, (unsigned char *)buf, n, &n) == -1) + return 0; + } + + /* large len -- decompress directly into user buffer */ + else { /* state->how == GZIP */ + state->strm.avail_out = n; + state->strm.next_out = (unsigned char *)buf; + if (gz_decomp(state) == -1) + return 0; + n = state->x.have; + state->x.have = 0; + } + + /* update progress */ + len -= n; + buf = (char *)buf + n; + got += n; + state->x.pos += n; + } while (len); + + /* return number of bytes read into user buffer */ + return got; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzread(file, buf, len) + gzFile file; + voidp buf; + unsigned len; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* since an int is returned, make sure len fits in one, otherwise return + with an error (this avoids a flaw in the interface) */ + if ((int)len < 0) { + gz_error(state, Z_STREAM_ERROR, "request does not fit in an int"); + return -1; + } + + /* read len or fewer bytes to buf */ + len = gz_read(state, buf, len); + + /* check for an error */ + if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + + /* return the number of bytes read (this is assured to fit in an int) */ + return (int)len; +} + +/* -- see zlib.h -- */ +z_size_t ZEXPORT gzfread(buf, size, nitems, file) + voidp buf; + z_size_t size; + z_size_t nitems; + gzFile file; +{ + z_size_t len; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return 0; + + /* compute bytes to read -- error on overflow */ + len = nitems * size; + if (size && len / size != nitems) { + gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t"); + return 0; + } + + /* read len or fewer bytes to buf, return the number of full items read */ + return len ? gz_read(state, buf, len) / size : 0; +} + +/* -- see zlib.h -- */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +#else +# undef gzgetc +#endif +int ZEXPORT gzgetc(file) + gzFile file; +{ + int ret; + unsigned char buf[1]; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* try output buffer (no need to check for skip request) */ + if (state->x.have) { + state->x.have--; + state->x.pos++; + return *(state->x.next)++; + } + + /* nothing there -- try gz_read() */ + ret = gz_read(state, buf, 1); + return ret < 1 ? -1 : buf[0]; +} + +int ZEXPORT gzgetc_(file) +gzFile file; +{ + return gzgetc(file); +} + +/* -- see zlib.h -- */ +int ZEXPORT gzungetc(c, file) + int c; + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return -1; + } + + /* can't push EOF */ + if (c < 0) + return -1; + + /* if output buffer empty, put byte at end (allows more pushing) */ + if (state->x.have == 0) { + state->x.have = 1; + state->x.next = state->out + (state->size << 1) - 1; + state->x.next[0] = (unsigned char)c; + state->x.pos--; + state->past = 0; + return c; + } + + /* if no room, give up (must have already done a gzungetc()) */ + if (state->x.have == (state->size << 1)) { + gz_error(state, Z_DATA_ERROR, "out of room to push characters"); + return -1; + } + + /* slide output data if needed and insert byte before existing data */ + if (state->x.next == state->out) { + unsigned char *src = state->out + state->x.have; + unsigned char *dest = state->out + (state->size << 1); + while (src > state->out) + *--dest = *--src; + state->x.next = dest; + } + state->x.have++; + state->x.next--; + state->x.next[0] = (unsigned char)c; + state->x.pos--; + state->past = 0; + return c; +} + +/* -- see zlib.h -- */ +char * ZEXPORT gzgets(file, buf, len) + gzFile file; + char *buf; + int len; +{ + unsigned left, n; + char *str; + unsigned char *eol; + gz_statep state; + + /* check parameters and get internal structure */ + if (file == NULL || buf == NULL || len < 1) + return NULL; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return NULL; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return NULL; + } + + /* copy output bytes up to new line or len - 1, whichever comes first -- + append a terminating zero to the string (we don't check for a zero in + the contents, let the user worry about that) */ + str = buf; + left = (unsigned)len - 1; + if (left) do { + /* assure that something is in the output buffer */ + if (state->x.have == 0 && gz_fetch(state) == -1) + return NULL; /* error */ + if (state->x.have == 0) { /* end of file */ + state->past = 1; /* read past end */ + break; /* return what we have */ + } + + /* look for end-of-line in current output buffer */ + n = state->x.have > left ? left : state->x.have; + eol = (unsigned char *)memchr(state->x.next, '\n', n); + if (eol != NULL) + n = (unsigned)(eol - state->x.next) + 1; + + /* copy through end-of-line, or remainder if not found */ + memcpy(buf, state->x.next, n); + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + left -= n; + buf += n; + } while (left && eol == NULL); + + /* return terminated string, or if nothing, end of file */ + if (buf == str) + return NULL; + buf[0] = 0; + return str; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzdirect(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + + /* if the state is not known, but we can find out, then do so (this is + mainly for right after a gzopen() or gzdopen()) */ + if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0) + (void)gz_look(state); + + /* return 1 if transparent, 0 if processing a gzip stream */ + return state->direct; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzclose_r(file) + gzFile file; +{ + int ret, err; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're reading */ + if (state->mode != GZ_READ) + return Z_STREAM_ERROR; + + /* free memory and close file */ + if (state->size) { + inflateEnd(&(state->strm)); + free(state->out); + free(state->in); + } + err = state->err == Z_BUF_ERROR ? Z_BUF_ERROR : Z_OK; + gz_error(state, Z_OK, NULL); + free(state->path); + ret = close(state->fd); + free(state); + return ret ? Z_ERRNO : err; +} diff --git a/deps/zlib-1.2.11/src/gzwrite.c b/deps/zlib-1.2.11/src/gzwrite.c new file mode 100644 index 000000000000..c7b5651d70b9 --- /dev/null +++ b/deps/zlib-1.2.11/src/gzwrite.c @@ -0,0 +1,665 @@ +/* gzwrite.c -- zlib functions for writing gzip files + * Copyright (C) 2004-2017 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* Local functions */ +local int gz_init OF((gz_statep)); +local int gz_comp OF((gz_statep, int)); +local int gz_zero OF((gz_statep, z_off64_t)); +local z_size_t gz_write OF((gz_statep, voidpc, z_size_t)); + +/* Initialize state for writing a gzip file. Mark initialization by setting + state->size to non-zero. Return -1 on a memory allocation failure, or 0 on + success. */ +local int gz_init(state) + gz_statep state; +{ + int ret; + z_streamp strm = &(state->strm); + + /* allocate input buffer (double size for gzprintf) */ + state->in = (unsigned char *)malloc(state->want << 1); + if (state->in == NULL) { + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + + /* only need output buffer and deflate state if compressing */ + if (!state->direct) { + /* allocate output buffer */ + state->out = (unsigned char *)malloc(state->want); + if (state->out == NULL) { + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + + /* allocate deflate memory, set up for gzip compression */ + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + ret = deflateInit2(strm, state->level, Z_DEFLATED, + MAX_WBITS + 16, DEF_MEM_LEVEL, state->strategy); + if (ret != Z_OK) { + free(state->out); + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + strm->next_in = NULL; + } + + /* mark state as initialized */ + state->size = state->want; + + /* initialize write buffer if compressing */ + if (!state->direct) { + strm->avail_out = state->size; + strm->next_out = state->out; + state->x.next = strm->next_out; + } + return 0; +} + +/* Compress whatever is at avail_in and next_in and write to the output file. + Return -1 if there is an error writing to the output file or if gz_init() + fails to allocate memory, otherwise 0. flush is assumed to be a valid + deflate() flush value. If flush is Z_FINISH, then the deflate() state is + reset to start a new gzip stream. If gz->direct is true, then simply write + to the output file without compressing, and ignore flush. */ +local int gz_comp(state, flush) + gz_statep state; + int flush; +{ + int ret, writ; + unsigned have, put, max = ((unsigned)-1 >> 2) + 1; + z_streamp strm = &(state->strm); + + /* allocate memory if this is the first time through */ + if (state->size == 0 && gz_init(state) == -1) + return -1; + + /* write directly if requested */ + if (state->direct) { + while (strm->avail_in) { + put = strm->avail_in > max ? max : strm->avail_in; + writ = write(state->fd, strm->next_in, put); + if (writ < 0) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + strm->avail_in -= (unsigned)writ; + strm->next_in += writ; + } + return 0; + } + + /* run deflate() on provided input until it produces no more output */ + ret = Z_OK; + do { + /* write out current buffer contents if full, or if flushing, but if + doing Z_FINISH then don't write until we get to Z_STREAM_END */ + if (strm->avail_out == 0 || (flush != Z_NO_FLUSH && + (flush != Z_FINISH || ret == Z_STREAM_END))) { + while (strm->next_out > state->x.next) { + put = strm->next_out - state->x.next > (int)max ? max : + (unsigned)(strm->next_out - state->x.next); + writ = write(state->fd, state->x.next, put); + if (writ < 0) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + state->x.next += writ; + } + if (strm->avail_out == 0) { + strm->avail_out = state->size; + strm->next_out = state->out; + state->x.next = state->out; + } + } + + /* compress */ + have = strm->avail_out; + ret = deflate(strm, flush); + if (ret == Z_STREAM_ERROR) { + gz_error(state, Z_STREAM_ERROR, + "internal error: deflate stream corrupt"); + return -1; + } + have -= strm->avail_out; + } while (have); + + /* if that completed a deflate stream, allow another to start */ + if (flush == Z_FINISH) + deflateReset(strm); + + /* all done, no errors */ + return 0; +} + +/* Compress len zeros to output. Return -1 on a write error or memory + allocation failure by gz_comp(), or 0 on success. */ +local int gz_zero(state, len) + gz_statep state; + z_off64_t len; +{ + int first; + unsigned n; + z_streamp strm = &(state->strm); + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return -1; + + /* compress len zeros (len guaranteed > 0) */ + first = 1; + while (len) { + n = GT_OFF(state->size) || (z_off64_t)state->size > len ? + (unsigned)len : state->size; + if (first) { + memset(state->in, 0, n); + first = 0; + } + strm->avail_in = n; + strm->next_in = state->in; + state->x.pos += n; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return -1; + len -= n; + } + return 0; +} + +/* Write len bytes from buf to file. Return the number of bytes written. If + the returned value is less than len, then there was an error. */ +local z_size_t gz_write(state, buf, len) + gz_statep state; + voidpc buf; + z_size_t len; +{ + z_size_t put = len; + + /* if len is zero, avoid unnecessary operations */ + if (len == 0) + return 0; + + /* allocate memory if this is the first time through */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* for small len, copy to input buffer, otherwise compress directly */ + if (len < state->size) { + /* copy to input buffer, compress when full */ + do { + unsigned have, copy; + + if (state->strm.avail_in == 0) + state->strm.next_in = state->in; + have = (unsigned)((state->strm.next_in + state->strm.avail_in) - + state->in); + copy = state->size - have; + if (copy > len) + copy = len; + memcpy(state->in + have, buf, copy); + state->strm.avail_in += copy; + state->x.pos += copy; + buf = (const char *)buf + copy; + len -= copy; + if (len && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + } while (len); + } + else { + /* consume whatever's left in the input buffer */ + if (state->strm.avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* directly compress user buffer to file */ + state->strm.next_in = (z_const Bytef *)buf; + do { + unsigned n = (unsigned)-1; + if (n > len) + n = len; + state->strm.avail_in = n; + state->x.pos += n; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + len -= n; + } while (len); + } + + /* input was all buffered or compressed */ + return put; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzwrite(file, buf, len) + gzFile file; + voidpc buf; + unsigned len; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* since an int is returned, make sure len fits in one, otherwise return + with an error (this avoids a flaw in the interface) */ + if ((int)len < 0) { + gz_error(state, Z_DATA_ERROR, "requested length does not fit in int"); + return 0; + } + + /* write len bytes from buf (the return value will fit in an int) */ + return (int)gz_write(state, buf, len); +} + +/* -- see zlib.h -- */ +z_size_t ZEXPORT gzfwrite(buf, size, nitems, file) + voidpc buf; + z_size_t size; + z_size_t nitems; + gzFile file; +{ + z_size_t len; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* compute bytes to read -- error on overflow */ + len = nitems * size; + if (size && len / size != nitems) { + gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t"); + return 0; + } + + /* write len bytes to buf, return the number of full items written */ + return len ? gz_write(state, buf, len) / size : 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzputc(file, c) + gzFile file; + int c; +{ + unsigned have; + unsigned char buf[1]; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return -1; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* try writing to input buffer for speed (state->size == 0 if buffer not + initialized) */ + if (state->size) { + if (strm->avail_in == 0) + strm->next_in = state->in; + have = (unsigned)((strm->next_in + strm->avail_in) - state->in); + if (have < state->size) { + state->in[have] = (unsigned char)c; + strm->avail_in++; + state->x.pos++; + return c & 0xff; + } + } + + /* no room in buffer or not initialized, use gz_write() */ + buf[0] = (unsigned char)c; + if (gz_write(state, buf, 1) != 1) + return -1; + return c & 0xff; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzputs(file, str) + gzFile file; + const char *str; +{ + int ret; + z_size_t len; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return -1; + + /* write string */ + len = strlen(str); + ret = gz_write(state, str, len); + return ret == 0 && len != 0 ? -1 : ret; +} + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +#include + +/* -- see zlib.h -- */ +int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) +{ + int len; + unsigned left; + char *next; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* make sure we have some buffer space */ + if (state->size == 0 && gz_init(state) == -1) + return state->err; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return state->err; + } + + /* do the printf() into the input buffer, put length in len -- the input + buffer is double-sized just for this function, so there is guaranteed to + be state->size bytes available after the current contents */ + if (strm->avail_in == 0) + strm->next_in = state->in; + next = (char *)(state->in + (strm->next_in - state->in) + strm->avail_in); + next[state->size - 1] = 0; +#ifdef NO_vsnprintf +# ifdef HAS_vsprintf_void + (void)vsprintf(next, format, va); + for (len = 0; len < state->size; len++) + if (next[len] == 0) break; +# else + len = vsprintf(next, format, va); +# endif +#else +# ifdef HAS_vsnprintf_void + (void)vsnprintf(next, state->size, format, va); + len = strlen(next); +# else + len = vsnprintf(next, state->size, format, va); +# endif +#endif + + /* check that printf() results fit in buffer */ + if (len == 0 || (unsigned)len >= state->size || next[state->size - 1] != 0) + return 0; + + /* update buffer and position, compress first half if past that */ + strm->avail_in += (unsigned)len; + state->x.pos += len; + if (strm->avail_in >= state->size) { + left = strm->avail_in - state->size; + strm->avail_in = state->size; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return state->err; + memcpy(state->in, state->in + state->size, left); + strm->next_in = state->in; + strm->avail_in = left; + } + return len; +} + +int ZEXPORTVA gzprintf(gzFile file, const char *format, ...) +{ + va_list va; + int ret; + + va_start(va, format); + ret = gzvprintf(file, format, va); + va_end(va); + return ret; +} + +#else /* !STDC && !Z_HAVE_STDARG_H */ + +/* -- see zlib.h -- */ +int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, + a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) + gzFile file; + const char *format; + int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, + a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; +{ + unsigned len, left; + char *next; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that can really pass pointer in ints */ + if (sizeof(int) != sizeof(void *)) + return Z_STREAM_ERROR; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* make sure we have some buffer space */ + if (state->size == 0 && gz_init(state) == -1) + return state->error; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return state->error; + } + + /* do the printf() into the input buffer, put length in len -- the input + buffer is double-sized just for this function, so there is guaranteed to + be state->size bytes available after the current contents */ + if (strm->avail_in == 0) + strm->next_in = state->in; + next = (char *)(strm->next_in + strm->avail_in); + next[state->size - 1] = 0; +#ifdef NO_snprintf +# ifdef HAS_sprintf_void + sprintf(next, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, + a13, a14, a15, a16, a17, a18, a19, a20); + for (len = 0; len < size; len++) + if (next[len] == 0) + break; +# else + len = sprintf(next, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, + a12, a13, a14, a15, a16, a17, a18, a19, a20); +# endif +#else +# ifdef HAS_snprintf_void + snprintf(next, state->size, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, + a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); + len = strlen(next); +# else + len = snprintf(next, state->size, format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); +# endif +#endif + + /* check that printf() results fit in buffer */ + if (len == 0 || len >= state->size || next[state->size - 1] != 0) + return 0; + + /* update buffer and position, compress first half if past that */ + strm->avail_in += len; + state->x.pos += len; + if (strm->avail_in >= state->size) { + left = strm->avail_in - state->size; + strm->avail_in = state->size; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return state->err; + memcpy(state->in, state->in + state->size, left); + strm->next_in = state->in; + strm->avail_in = left; + } + return (int)len; +} + +#endif + +/* -- see zlib.h -- */ +int ZEXPORT gzflush(file, flush) + gzFile file; + int flush; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* check flush parameter */ + if (flush < 0 || flush > Z_FINISH) + return Z_STREAM_ERROR; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return state->err; + } + + /* compress remaining data with requested flush */ + (void)gz_comp(state, flush); + return state->err; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzsetparams(file, level, strategy) + gzFile file; + int level; + int strategy; +{ + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* if no change is requested, then do nothing */ + if (level == state->level && strategy == state->strategy) + return Z_OK; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return state->err; + } + + /* change compression parameters for subsequent input */ + if (state->size) { + /* flush previous input with previous parameters before changing */ + if (strm->avail_in && gz_comp(state, Z_BLOCK) == -1) + return state->err; + deflateParams(strm, level, strategy); + } + state->level = level; + state->strategy = strategy; + return Z_OK; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzclose_w(file) + gzFile file; +{ + int ret = Z_OK; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're writing */ + if (state->mode != GZ_WRITE) + return Z_STREAM_ERROR; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + ret = state->err; + } + + /* flush, free memory, and close file */ + if (gz_comp(state, Z_FINISH) == -1) + ret = state->err; + if (state->size) { + if (!state->direct) { + (void)deflateEnd(&(state->strm)); + free(state->out); + } + free(state->in); + } + gz_error(state, Z_OK, NULL); + free(state->path); + if (close(state->fd) == -1) + ret = Z_ERRNO; + free(state); + return ret; +} diff --git a/deps/zlib-1.2.11/src/infback.c b/deps/zlib-1.2.11/src/infback.c new file mode 100644 index 000000000000..59679ecbfc5d --- /dev/null +++ b/deps/zlib-1.2.11/src/infback.c @@ -0,0 +1,640 @@ +/* infback.c -- inflate using a call-back interface + * Copyright (C) 1995-2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + This code is largely copied from inflate.c. Normally either infback.o or + inflate.o would be linked into an application--not both. The interface + with inffast.c is retained so that optimized assembler-coded versions of + inflate_fast() can be used with either inflate.c or infback.c. + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +/* function prototypes */ +local void fixedtables OF((struct inflate_state FAR *state)); + +/* + strm provides memory allocation functions in zalloc and zfree, or + Z_NULL to use the library memory allocation functions. + + windowBits is in the range 8..15, and window is a user-supplied + window and output buffer that is 2**windowBits bytes. + */ +int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) +z_streamp strm; +int windowBits; +unsigned char FAR *window; +const char *version; +int stream_size; +{ + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL || window == Z_NULL || + windowBits < 8 || windowBits > 15) + return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + state = (struct inflate_state FAR *)ZALLOC(strm, 1, + sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (struct internal_state FAR *)state; + state->dmax = 32768U; + state->wbits = (uInt)windowBits; + state->wsize = 1U << windowBits; + state->window = window; + state->wnext = 0; + state->whave = 0; + return Z_OK; +} + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +local void fixedtables(state) +struct inflate_state FAR *state; +{ +#ifdef BUILDFIXED + static int virgin = 1; + static code *lenfix, *distfix; + static code fixed[544]; + + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + unsigned sym, bits; + static code *next; + + /* literal/length table */ + sym = 0; + while (sym < 144) state->lens[sym++] = 8; + while (sym < 256) state->lens[sym++] = 9; + while (sym < 280) state->lens[sym++] = 7; + while (sym < 288) state->lens[sym++] = 8; + next = fixed; + lenfix = next; + bits = 9; + inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); + + /* distance table */ + sym = 0; + while (sym < 32) state->lens[sym++] = 5; + distfix = next; + bits = 5; + inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); + + /* do this just once */ + virgin = 0; + } +#else /* !BUILDFIXED */ +# include "inffixed.h" +#endif /* BUILDFIXED */ + state->lencode = lenfix; + state->lenbits = 9; + state->distcode = distfix; + state->distbits = 5; +} + +/* Macros for inflateBack(): */ + +/* Load returned state from inflate_fast() */ +#define LOAD() \ + do { \ + put = strm->next_out; \ + left = strm->avail_out; \ + next = strm->next_in; \ + have = strm->avail_in; \ + hold = state->hold; \ + bits = state->bits; \ + } while (0) + +/* Set state from registers for inflate_fast() */ +#define RESTORE() \ + do { \ + strm->next_out = put; \ + strm->avail_out = left; \ + strm->next_in = next; \ + strm->avail_in = have; \ + state->hold = hold; \ + state->bits = bits; \ + } while (0) + +/* Clear the input bit accumulator */ +#define INITBITS() \ + do { \ + hold = 0; \ + bits = 0; \ + } while (0) + +/* Assure that some input is available. If input is requested, but denied, + then return a Z_BUF_ERROR from inflateBack(). */ +#define PULL() \ + do { \ + if (have == 0) { \ + have = in(in_desc, &next); \ + if (have == 0) { \ + next = Z_NULL; \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* Get a byte of input into the bit accumulator, or return from inflateBack() + with an error if there is no input available. */ +#define PULLBYTE() \ + do { \ + PULL(); \ + have--; \ + hold += (unsigned long)(*next++) << bits; \ + bits += 8; \ + } while (0) + +/* Assure that there are at least n bits in the bit accumulator. If there is + not enough available input to do that, then return from inflateBack() with + an error. */ +#define NEEDBITS(n) \ + do { \ + while (bits < (unsigned)(n)) \ + PULLBYTE(); \ + } while (0) + +/* Return the low n bits of the bit accumulator (n < 16) */ +#define BITS(n) \ + ((unsigned)hold & ((1U << (n)) - 1)) + +/* Remove n bits from the bit accumulator */ +#define DROPBITS(n) \ + do { \ + hold >>= (n); \ + bits -= (unsigned)(n); \ + } while (0) + +/* Remove zero to seven bits as needed to go to a byte boundary */ +#define BYTEBITS() \ + do { \ + hold >>= bits & 7; \ + bits -= bits & 7; \ + } while (0) + +/* Assure that some output space is available, by writing out the window + if it's full. If the write fails, return from inflateBack() with a + Z_BUF_ERROR. */ +#define ROOM() \ + do { \ + if (left == 0) { \ + put = state->window; \ + left = state->wsize; \ + state->whave = left; \ + if (out(out_desc, put, left)) { \ + ret = Z_BUF_ERROR; \ + goto inf_leave; \ + } \ + } \ + } while (0) + +/* + strm provides the memory allocation functions and window buffer on input, + and provides information on the unused input on return. For Z_DATA_ERROR + returns, strm will also provide an error message. + + in() and out() are the call-back input and output functions. When + inflateBack() needs more input, it calls in(). When inflateBack() has + filled the window with output, or when it completes with data in the + window, it calls out() to write out the data. The application must not + change the provided input until in() is called again or inflateBack() + returns. The application must not change the window/output buffer until + inflateBack() returns. + + in() and out() are called with a descriptor parameter provided in the + inflateBack() call. This parameter can be a structure that provides the + information required to do the read or write, as well as accumulated + information on the input and output such as totals and check values. + + in() should return zero on failure. out() should return non-zero on + failure. If either in() or out() fails, than inflateBack() returns a + Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it + was in() or out() that caused in the error. Otherwise, inflateBack() + returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format + error, or Z_MEM_ERROR if it could not allocate memory for the state. + inflateBack() can also return Z_STREAM_ERROR if the input parameters + are not correct, i.e. strm is Z_NULL or the state was not initialized. + */ +int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) +z_streamp strm; +in_func in; +void FAR *in_desc; +out_func out; +void FAR *out_desc; +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *next; /* next input */ + unsigned char FAR *put; /* next output */ + unsigned have, left; /* available input and output */ + unsigned long hold; /* bit buffer */ + unsigned bits; /* bits in bit buffer */ + unsigned copy; /* number of stored or match bytes to copy */ + unsigned char FAR *from; /* where to copy match bytes from */ + code here; /* current decoding table entry */ + code last; /* parent table entry */ + unsigned len; /* length to copy for repeats, bits to drop */ + int ret; /* return code */ + static const unsigned short order[19] = /* permutation of code lengths */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + /* Check that the strm exists and that the state was initialized */ + if (strm == Z_NULL || strm->state == Z_NULL) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* Reset the state */ + strm->msg = Z_NULL; + state->mode = TYPE; + state->last = 0; + state->whave = 0; + next = strm->next_in; + have = next != Z_NULL ? strm->avail_in : 0; + hold = 0; + bits = 0; + put = state->window; + left = state->wsize; + + /* Inflate until end of block marked as last */ + for (;;) + switch (state->mode) { + case TYPE: + /* determine and dispatch block type */ + if (state->last) { + BYTEBITS(); + state->mode = DONE; + break; + } + NEEDBITS(3); + state->last = BITS(1); + DROPBITS(1); + switch (BITS(2)) { + case 0: /* stored block */ + Tracev((stderr, "inflate: stored block%s\n", + state->last ? " (last)" : "")); + state->mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + Tracev((stderr, "inflate: fixed codes block%s\n", + state->last ? " (last)" : "")); + state->mode = LEN; /* decode codes */ + break; + case 2: /* dynamic block */ + Tracev((stderr, "inflate: dynamic codes block%s\n", + state->last ? " (last)" : "")); + state->mode = TABLE; + break; + case 3: + strm->msg = (char *)"invalid block type"; + state->mode = BAD; + } + DROPBITS(2); + break; + + case STORED: + /* get and verify stored block length */ + BYTEBITS(); /* go to byte boundary */ + NEEDBITS(32); + if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { + strm->msg = (char *)"invalid stored block lengths"; + state->mode = BAD; + break; + } + state->length = (unsigned)hold & 0xffff; + Tracev((stderr, "inflate: stored length %u\n", + state->length)); + INITBITS(); + + /* copy stored block from input to output */ + while (state->length != 0) { + copy = state->length; + PULL(); + ROOM(); + if (copy > have) copy = have; + if (copy > left) copy = left; + zmemcpy(put, next, copy); + have -= copy; + next += copy; + left -= copy; + put += copy; + state->length -= copy; + } + Tracev((stderr, "inflate: stored end\n")); + state->mode = TYPE; + break; + + case TABLE: + /* get dynamic table entries descriptor */ + NEEDBITS(14); + state->nlen = BITS(5) + 257; + DROPBITS(5); + state->ndist = BITS(5) + 1; + DROPBITS(5); + state->ncode = BITS(4) + 4; + DROPBITS(4); +#ifndef PKZIP_BUG_WORKAROUND + if (state->nlen > 286 || state->ndist > 30) { + strm->msg = (char *)"too many length or distance symbols"; + state->mode = BAD; + break; + } +#endif + Tracev((stderr, "inflate: table sizes ok\n")); + + /* get code length code lengths (not a typo) */ + state->have = 0; + while (state->have < state->ncode) { + NEEDBITS(3); + state->lens[order[state->have++]] = (unsigned short)BITS(3); + DROPBITS(3); + } + while (state->have < 19) + state->lens[order[state->have++]] = 0; + state->next = state->codes; + state->lencode = (code const FAR *)(state->next); + state->lenbits = 7; + ret = inflate_table(CODES, state->lens, 19, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid code lengths set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: code lengths ok\n")); + + /* get length and distance code code lengths */ + state->have = 0; + while (state->have < state->nlen + state->ndist) { + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; + } + else { + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); + if (state->have == 0) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + len = (unsigned)(state->lens[state->have - 1]); + copy = 3 + BITS(2); + DROPBITS(2); + } + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); + len = 0; + copy = 3 + BITS(3); + DROPBITS(3); + } + else { + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); + len = 0; + copy = 11 + BITS(7); + DROPBITS(7); + } + if (state->have + copy > state->nlen + state->ndist) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + while (copy--) + state->lens[state->have++] = (unsigned short)len; + } + } + + /* handle error breaks in while */ + if (state->mode == BAD) break; + + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state->next = state->codes; + state->lencode = (code const FAR *)(state->next); + state->lenbits = 9; + ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid literal/lengths set"; + state->mode = BAD; + break; + } + state->distcode = (code const FAR *)(state->next); + state->distbits = 6; + ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, + &(state->next), &(state->distbits), state->work); + if (ret) { + strm->msg = (char *)"invalid distances set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: codes ok\n")); + state->mode = LEN; + + case LEN: + /* use inflate_fast() if we have enough input and output */ + if (have >= 6 && left >= 258) { + RESTORE(); + if (state->whave < state->wsize) + state->whave = state->wsize - left; + inflate_fast(strm, state->wsize); + LOAD(); + break; + } + + /* get a literal, length, or end-of-block code */ + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.op && (here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->lencode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + state->length = (unsigned)here.val; + + /* process literal */ + if (here.op == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + ROOM(); + *put++ = (unsigned char)(state->length); + left--; + state->mode = LEN; + break; + } + + /* process end of block */ + if (here.op & 32) { + Tracevv((stderr, "inflate: end of block\n")); + state->mode = TYPE; + break; + } + + /* invalid code */ + if (here.op & 64) { + strm->msg = (char *)"invalid literal/length code"; + state->mode = BAD; + break; + } + + /* length code -- get extra bits, if any */ + state->extra = (unsigned)(here.op) & 15; + if (state->extra != 0) { + NEEDBITS(state->extra); + state->length += BITS(state->extra); + DROPBITS(state->extra); + } + Tracevv((stderr, "inflate: length %u\n", state->length)); + + /* get distance code */ + for (;;) { + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if ((here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->distcode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + } + DROPBITS(here.bits); + if (here.op & 64) { + strm->msg = (char *)"invalid distance code"; + state->mode = BAD; + break; + } + state->offset = (unsigned)here.val; + + /* get distance extra bits, if any */ + state->extra = (unsigned)(here.op) & 15; + if (state->extra != 0) { + NEEDBITS(state->extra); + state->offset += BITS(state->extra); + DROPBITS(state->extra); + } + if (state->offset > state->wsize - (state->whave < state->wsize ? + left : 0)) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } + Tracevv((stderr, "inflate: distance %u\n", state->offset)); + + /* copy match from window to output */ + do { + ROOM(); + copy = state->wsize - state->offset; + if (copy < left) { + from = put + copy; + copy = left - copy; + } + else { + from = put - state->offset; + copy = left; + } + if (copy > state->length) copy = state->length; + state->length -= copy; + left -= copy; + do { + *put++ = *from++; + } while (--copy); + } while (state->length != 0); + break; + + case DONE: + /* inflate stream terminated properly -- write leftover output */ + ret = Z_STREAM_END; + if (left < state->wsize) { + if (out(out_desc, state->window, state->wsize - left)) + ret = Z_BUF_ERROR; + } + goto inf_leave; + + case BAD: + ret = Z_DATA_ERROR; + goto inf_leave; + + default: /* can't happen, but makes compilers happy */ + ret = Z_STREAM_ERROR; + goto inf_leave; + } + + /* Return unused input */ + inf_leave: + strm->next_in = next; + strm->avail_in = have; + return ret; +} + +int ZEXPORT inflateBackEnd(strm) +z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) + return Z_STREAM_ERROR; + ZFREE(strm, strm->state); + strm->state = Z_NULL; + Tracev((stderr, "inflate: end\n")); + return Z_OK; +} diff --git a/deps/zlib-1.2.11/src/inffast.c b/deps/zlib-1.2.11/src/inffast.c new file mode 100644 index 000000000000..0dbd1dbc09f2 --- /dev/null +++ b/deps/zlib-1.2.11/src/inffast.c @@ -0,0 +1,323 @@ +/* inffast.c -- fast decoding + * Copyright (C) 1995-2017 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +#ifdef ASMINF +# pragma message("Assembler code may have bugs -- use at your own risk") +#else + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state->mode == LEN + strm->avail_in >= 6 + strm->avail_out >= 258 + start >= strm->avail_out + state->bits < 8 + + On return, state->mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm->avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm->avail_out >= 258 for each loop to avoid checking for + output space. + */ +void ZLIB_INTERNAL inflate_fast(strm, start) +z_streamp strm; +unsigned start; /* inflate()'s starting value for strm->avail_out */ +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *in; /* local strm->next_in */ + z_const unsigned char FAR *last; /* have enough input while in < last */ + unsigned char FAR *out; /* local strm->next_out */ + unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ + unsigned char FAR *end; /* while out < end, enough space available */ +#ifdef INFLATE_STRICT + unsigned dmax; /* maximum distance from zlib header */ +#endif + unsigned wsize; /* window size or zero if not using window */ + unsigned whave; /* valid bytes in the window */ + unsigned wnext; /* window write index */ + unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ + unsigned long hold; /* local strm->hold */ + unsigned bits; /* local strm->bits */ + code const FAR *lcode; /* local strm->lencode */ + code const FAR *dcode; /* local strm->distcode */ + unsigned lmask; /* mask for first level of length codes */ + unsigned dmask; /* mask for first level of distance codes */ + code here; /* retrieved table entry */ + unsigned op; /* code bits, operation, extra bits, or */ + /* window position, window bytes to copy */ + unsigned len; /* match length, unused bytes */ + unsigned dist; /* match distance */ + unsigned char FAR *from; /* where to copy match from */ + + /* copy state to local variables */ + state = (struct inflate_state FAR *)strm->state; + in = strm->next_in; + last = in + (strm->avail_in - 5); + out = strm->next_out; + beg = out - (start - strm->avail_out); + end = out + (strm->avail_out - 257); +#ifdef INFLATE_STRICT + dmax = state->dmax; +#endif + wsize = state->wsize; + whave = state->whave; + wnext = state->wnext; + window = state->window; + hold = state->hold; + bits = state->bits; + lcode = state->lencode; + dcode = state->distcode; + lmask = (1U << state->lenbits) - 1; + dmask = (1U << state->distbits) - 1; + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + do { + if (bits < 15) { + hold += (unsigned long)(*in++) << bits; + bits += 8; + hold += (unsigned long)(*in++) << bits; + bits += 8; + } + here = lcode[hold & lmask]; + dolen: + op = (unsigned)(here.bits); + hold >>= op; + bits -= op; + op = (unsigned)(here.op); + if (op == 0) { /* literal */ + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + *out++ = (unsigned char)(here.val); + } + else if (op & 16) { /* length base */ + len = (unsigned)(here.val); + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { + hold += (unsigned long)(*in++) << bits; + bits += 8; + } + len += (unsigned)hold & ((1U << op) - 1); + hold >>= op; + bits -= op; + } + Tracevv((stderr, "inflate: length %u\n", len)); + if (bits < 15) { + hold += (unsigned long)(*in++) << bits; + bits += 8; + hold += (unsigned long)(*in++) << bits; + bits += 8; + } + here = dcode[hold & dmask]; + dodist: + op = (unsigned)(here.bits); + hold >>= op; + bits -= op; + op = (unsigned)(here.op); + if (op & 16) { /* distance base */ + dist = (unsigned)(here.val); + op &= 15; /* number of extra bits */ + if (bits < op) { + hold += (unsigned long)(*in++) << bits; + bits += 8; + if (bits < op) { + hold += (unsigned long)(*in++) << bits; + bits += 8; + } + } + dist += (unsigned)hold & ((1U << op) - 1); +#ifdef INFLATE_STRICT + if (dist > dmax) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#endif + hold >>= op; + bits -= op; + Tracevv((stderr, "inflate: distance %u\n", dist)); + op = (unsigned)(out - beg); /* max distance in output */ + if (dist > op) { /* see if copy from window */ + op = dist - op; /* distance back in window */ + if (op > whave) { + if (state->sane) { + strm->msg = + (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + if (len <= op - whave) { + do { + *out++ = 0; + } while (--len); + continue; + } + len -= op - whave; + do { + *out++ = 0; + } while (--op > whave); + if (op == 0) { + from = out - dist; + do { + *out++ = *from++; + } while (--len); + continue; + } +#endif + } + from = window; + if (wnext == 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { + *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } + } + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; + if (op < len) { /* some from end of window */ + len -= op; + do { + *out++ = *from++; + } while (--op); + from = window; + if (wnext < len) { /* some from start of window */ + op = wnext; + len -= op; + do { + *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } + } + } + else { /* contiguous in window */ + from += wnext - op; + if (op < len) { /* some from window */ + len -= op; + do { + *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } + } + while (len > 2) { + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; + len -= 3; + } + if (len) { + *out++ = *from++; + if (len > 1) + *out++ = *from++; + } + } + else { + from = out - dist; /* copy direct from output */ + do { /* minimum length is three */ + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; + len -= 3; + } while (len > 2); + if (len) { + *out++ = *from++; + if (len > 1) + *out++ = *from++; + } + } + } + else if ((op & 64) == 0) { /* 2nd level distance code */ + here = dcode[here.val + (hold & ((1U << op) - 1))]; + goto dodist; + } + else { + strm->msg = (char *)"invalid distance code"; + state->mode = BAD; + break; + } + } + else if ((op & 64) == 0) { /* 2nd level length code */ + here = lcode[here.val + (hold & ((1U << op) - 1))]; + goto dolen; + } + else if (op & 32) { /* end-of-block */ + Tracevv((stderr, "inflate: end of block\n")); + state->mode = TYPE; + break; + } + else { + strm->msg = (char *)"invalid literal/length code"; + state->mode = BAD; + break; + } + } while (in < last && out < end); + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + len = bits >> 3; + in -= len; + bits -= len << 3; + hold &= (1U << bits) - 1; + + /* update state and return */ + strm->next_in = in; + strm->next_out = out; + strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); + strm->avail_out = (unsigned)(out < end ? + 257 + (end - out) : 257 - (out - end)); + state->hold = hold; + state->bits = bits; + return; +} + +/* + inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): + - Using bit fields for code structure + - Different op definition to avoid & for extra bits (do & for table bits) + - Three separate decoding do-loops for direct, window, and wnext == 0 + - Special case for distance > 1 copies to do overlapped load and store copy + - Explicit branch predictions (based on measured branch probabilities) + - Deferring match copy and interspersed it with decoding subsequent codes + - Swapping literal/length else + - Swapping window/direct else + - Larger unrolled copy loops (three is about right) + - Moving len -= 3 statement into middle of loop + */ + +#endif /* !ASMINF */ diff --git a/deps/zlib-1.2.11/src/inffast.h b/deps/zlib-1.2.11/src/inffast.h new file mode 100644 index 000000000000..e5c1aa4ca8cd --- /dev/null +++ b/deps/zlib-1.2.11/src/inffast.h @@ -0,0 +1,11 @@ +/* inffast.h -- header to use inffast.c + * Copyright (C) 1995-2003, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start)); diff --git a/deps/zlib-1.2.11/src/inffixed.h b/deps/zlib-1.2.11/src/inffixed.h new file mode 100644 index 000000000000..d62832776948 --- /dev/null +++ b/deps/zlib-1.2.11/src/inffixed.h @@ -0,0 +1,94 @@ + /* inffixed.h -- table for decoding fixed codes + * Generated automatically by makefixed(). + */ + + /* WARNING: this file should *not* be used by applications. + It is part of the implementation of this library and is + subject to change. Applications should only use zlib.h. + */ + + static const code lenfix[512] = { + {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, + {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, + {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, + {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, + {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, + {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, + {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, + {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, + {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, + {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, + {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, + {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, + {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, + {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, + {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, + {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, + {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, + {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, + {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, + {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, + {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, + {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, + {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, + {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, + {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, + {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, + {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, + {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, + {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, + {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, + {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, + {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, + {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, + {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, + {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, + {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, + {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, + {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, + {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, + {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, + {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, + {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, + {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, + {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, + {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, + {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, + {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, + {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, + {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, + {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, + {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, + {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, + {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, + {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, + {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, + {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, + {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, + {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, + {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, + {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, + {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, + {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, + {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, + {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, + {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, + {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, + {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, + {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, + {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, + {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, + {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, + {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, + {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, + {0,9,255} + }; + + static const code distfix[32] = { + {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, + {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, + {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, + {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, + {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, + {22,5,193},{64,5,0} + }; diff --git a/deps/zlib-1.2.11/src/inflate.c b/deps/zlib-1.2.11/src/inflate.c new file mode 100644 index 000000000000..ac333e8c2eda --- /dev/null +++ b/deps/zlib-1.2.11/src/inflate.c @@ -0,0 +1,1561 @@ +/* inflate.c -- zlib decompression + * Copyright (C) 1995-2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * Change history: + * + * 1.2.beta0 24 Nov 2002 + * - First version -- complete rewrite of inflate to simplify code, avoid + * creation of window when not needed, minimize use of window when it is + * needed, make inffast.c even faster, implement gzip decoding, and to + * improve code readability and style over the previous zlib inflate code + * + * 1.2.beta1 25 Nov 2002 + * - Use pointers for available input and output checking in inffast.c + * - Remove input and output counters in inffast.c + * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 + * - Remove unnecessary second byte pull from length extra in inffast.c + * - Unroll direct copy to three copies per loop in inffast.c + * + * 1.2.beta2 4 Dec 2002 + * - Change external routine names to reduce potential conflicts + * - Correct filename to inffixed.h for fixed tables in inflate.c + * - Make hbuf[] unsigned char to match parameter type in inflate.c + * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) + * to avoid negation problem on Alphas (64 bit) in inflate.c + * + * 1.2.beta3 22 Dec 2002 + * - Add comments on state->bits assertion in inffast.c + * - Add comments on op field in inftrees.h + * - Fix bug in reuse of allocated window after inflateReset() + * - Remove bit fields--back to byte structure for speed + * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths + * - Change post-increments to pre-increments in inflate_fast(), PPC biased? + * - Add compile time option, POSTINC, to use post-increments instead (Intel?) + * - Make MATCH copy in inflate() much faster for when inflate_fast() not used + * - Use local copies of stream next and avail values, as well as local bit + * buffer and bit count in inflate()--for speed when inflate_fast() not used + * + * 1.2.beta4 1 Jan 2003 + * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings + * - Move a comment on output buffer sizes from inffast.c to inflate.c + * - Add comments in inffast.c to introduce the inflate_fast() routine + * - Rearrange window copies in inflate_fast() for speed and simplification + * - Unroll last copy for window match in inflate_fast() + * - Use local copies of window variables in inflate_fast() for speed + * - Pull out common wnext == 0 case for speed in inflate_fast() + * - Make op and len in inflate_fast() unsigned for consistency + * - Add FAR to lcode and dcode declarations in inflate_fast() + * - Simplified bad distance check in inflate_fast() + * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new + * source file infback.c to provide a call-back interface to inflate for + * programs like gzip and unzip -- uses window as output buffer to avoid + * window copying + * + * 1.2.beta5 1 Jan 2003 + * - Improved inflateBack() interface to allow the caller to provide initial + * input in strm. + * - Fixed stored blocks bug in inflateBack() + * + * 1.2.beta6 4 Jan 2003 + * - Added comments in inffast.c on effectiveness of POSTINC + * - Typecasting all around to reduce compiler warnings + * - Changed loops from while (1) or do {} while (1) to for (;;), again to + * make compilers happy + * - Changed type of window in inflateBackInit() to unsigned char * + * + * 1.2.beta7 27 Jan 2003 + * - Changed many types to unsigned or unsigned short to avoid warnings + * - Added inflateCopy() function + * + * 1.2.0 9 Mar 2003 + * - Changed inflateBack() interface to provide separate opaque descriptors + * for the in() and out() functions + * - Changed inflateBack() argument and in_func typedef to swap the length + * and buffer address return values for the input function + * - Check next_in and next_out for Z_NULL on entry to inflate() + * + * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. + */ + +#include "zutil.h" +#include "inftrees.h" +#include "inflate.h" +#include "inffast.h" + +#ifdef MAKEFIXED +# ifndef BUILDFIXED +# define BUILDFIXED +# endif +#endif + +/* function prototypes */ +local int inflateStateCheck OF((z_streamp strm)); +local void fixedtables OF((struct inflate_state FAR *state)); +local int updatewindow OF((z_streamp strm, const unsigned char FAR *end, + unsigned copy)); +#ifdef BUILDFIXED + void makefixed OF((void)); +#endif +local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf, + unsigned len)); + +local int inflateStateCheck(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + if (strm == Z_NULL || + strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) + return 1; + state = (struct inflate_state FAR *)strm->state; + if (state == Z_NULL || state->strm != strm || + state->mode < HEAD || state->mode > SYNC) + return 1; + return 0; +} + +int ZEXPORT inflateResetKeep(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + strm->total_in = strm->total_out = state->total = 0; + strm->msg = Z_NULL; + if (state->wrap) /* to support ill-conceived Java test suite */ + strm->adler = state->wrap & 1; + state->mode = HEAD; + state->last = 0; + state->havedict = 0; + state->dmax = 32768U; + state->head = Z_NULL; + state->hold = 0; + state->bits = 0; + state->lencode = state->distcode = state->next = state->codes; + state->sane = 1; + state->back = -1; + Tracev((stderr, "inflate: reset\n")); + return Z_OK; +} + +int ZEXPORT inflateReset(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + state->wsize = 0; + state->whave = 0; + state->wnext = 0; + return inflateResetKeep(strm); +} + +int ZEXPORT inflateReset2(strm, windowBits) +z_streamp strm; +int windowBits; +{ + int wrap; + struct inflate_state FAR *state; + + /* get the state */ + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 5; +#ifdef GUNZIP + if (windowBits < 48) + windowBits &= 15; +#endif + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) + return Z_STREAM_ERROR; + if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { + ZFREE(strm, state->window); + state->window = Z_NULL; + } + + /* update state and reset the rest of it */ + state->wrap = wrap; + state->wbits = (unsigned)windowBits; + return inflateReset(strm); +} + +int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) +z_streamp strm; +int windowBits; +const char *version; +int stream_size; +{ + int ret; + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL) return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + state = (struct inflate_state FAR *) + ZALLOC(strm, 1, sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (struct internal_state FAR *)state; + state->strm = strm; + state->window = Z_NULL; + state->mode = HEAD; /* to pass state test in inflateReset2() */ + ret = inflateReset2(strm, windowBits); + if (ret != Z_OK) { + ZFREE(strm, state); + strm->state = Z_NULL; + } + return ret; +} + +int ZEXPORT inflateInit_(strm, version, stream_size) +z_streamp strm; +const char *version; +int stream_size; +{ + return inflateInit2_(strm, DEF_WBITS, version, stream_size); +} + +int ZEXPORT inflatePrime(strm, bits, value) +z_streamp strm; +int bits; +int value; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (bits < 0) { + state->hold = 0; + state->bits = 0; + return Z_OK; + } + if (bits > 16 || state->bits + (uInt)bits > 32) return Z_STREAM_ERROR; + value &= (1L << bits) - 1; + state->hold += (unsigned)value << state->bits; + state->bits += (uInt)bits; + return Z_OK; +} + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +local void fixedtables(state) +struct inflate_state FAR *state; +{ +#ifdef BUILDFIXED + static int virgin = 1; + static code *lenfix, *distfix; + static code fixed[544]; + + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + unsigned sym, bits; + static code *next; + + /* literal/length table */ + sym = 0; + while (sym < 144) state->lens[sym++] = 8; + while (sym < 256) state->lens[sym++] = 9; + while (sym < 280) state->lens[sym++] = 7; + while (sym < 288) state->lens[sym++] = 8; + next = fixed; + lenfix = next; + bits = 9; + inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); + + /* distance table */ + sym = 0; + while (sym < 32) state->lens[sym++] = 5; + distfix = next; + bits = 5; + inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); + + /* do this just once */ + virgin = 0; + } +#else /* !BUILDFIXED */ +# include "inffixed.h" +#endif /* BUILDFIXED */ + state->lencode = lenfix; + state->lenbits = 9; + state->distcode = distfix; + state->distbits = 5; +} + +#ifdef MAKEFIXED +#include + +/* + Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also + defines BUILDFIXED, so the tables are built on the fly. makefixed() writes + those tables to stdout, which would be piped to inffixed.h. A small program + can simply call makefixed to do this: + + void makefixed(void); + + int main(void) + { + makefixed(); + return 0; + } + + Then that can be linked with zlib built with MAKEFIXED defined and run: + + a.out > inffixed.h + */ +void makefixed() +{ + unsigned low, size; + struct inflate_state state; + + fixedtables(&state); + puts(" /* inffixed.h -- table for decoding fixed codes"); + puts(" * Generated automatically by makefixed()."); + puts(" */"); + puts(""); + puts(" /* WARNING: this file should *not* be used by applications."); + puts(" It is part of the implementation of this library and is"); + puts(" subject to change. Applications should only use zlib.h."); + puts(" */"); + puts(""); + size = 1U << 9; + printf(" static const code lenfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 7) == 0) printf("\n "); + printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, + state.lencode[low].bits, state.lencode[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); + size = 1U << 5; + printf("\n static const code distfix[%u] = {", size); + low = 0; + for (;;) { + if ((low % 6) == 0) printf("\n "); + printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, + state.distcode[low].val); + if (++low == size) break; + putchar(','); + } + puts("\n };"); +} +#endif /* MAKEFIXED */ + +/* + Update the window with the last wsize (normally 32K) bytes written before + returning. If window does not exist yet, create it. This is only called + when a window is already in use, or when output has been written during this + inflate call, but the end of the deflate stream has not been reached yet. + It is also called to create a window for dictionary data when a dictionary + is loaded. + + Providing output buffers larger than 32K to inflate() should provide a speed + advantage, since only the last 32K of output is copied to the sliding window + upon return from inflate(), and since all distances after the first 32K of + output will fall in the output data, making match copies simpler and faster. + The advantage may be dependent on the size of the processor's data caches. + */ +local int updatewindow(strm, end, copy) +z_streamp strm; +const Bytef *end; +unsigned copy; +{ + struct inflate_state FAR *state; + unsigned dist; + + state = (struct inflate_state FAR *)strm->state; + + /* if it hasn't been done already, allocate space for the window */ + if (state->window == Z_NULL) { + state->window = (unsigned char FAR *) + ZALLOC(strm, 1U << state->wbits, + sizeof(unsigned char)); + if (state->window == Z_NULL) return 1; + } + + /* if window not in use yet, initialize */ + if (state->wsize == 0) { + state->wsize = 1U << state->wbits; + state->wnext = 0; + state->whave = 0; + } + + /* copy state->wsize or less output bytes into the circular window */ + if (copy >= state->wsize) { + zmemcpy(state->window, end - state->wsize, state->wsize); + state->wnext = 0; + state->whave = state->wsize; + } + else { + dist = state->wsize - state->wnext; + if (dist > copy) dist = copy; + zmemcpy(state->window + state->wnext, end - copy, dist); + copy -= dist; + if (copy) { + zmemcpy(state->window, end - copy, copy); + state->wnext = copy; + state->whave = state->wsize; + } + else { + state->wnext += dist; + if (state->wnext == state->wsize) state->wnext = 0; + if (state->whave < state->wsize) state->whave += dist; + } + } + return 0; +} + +/* Macros for inflate(): */ + +/* check function to use adler32() for zlib or crc32() for gzip */ +#ifdef GUNZIP +# define UPDATE(check, buf, len) \ + (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) +#else +# define UPDATE(check, buf, len) adler32(check, buf, len) +#endif + +/* check macros for header crc */ +#ifdef GUNZIP +# define CRC2(check, word) \ + do { \ + hbuf[0] = (unsigned char)(word); \ + hbuf[1] = (unsigned char)((word) >> 8); \ + check = crc32(check, hbuf, 2); \ + } while (0) + +# define CRC4(check, word) \ + do { \ + hbuf[0] = (unsigned char)(word); \ + hbuf[1] = (unsigned char)((word) >> 8); \ + hbuf[2] = (unsigned char)((word) >> 16); \ + hbuf[3] = (unsigned char)((word) >> 24); \ + check = crc32(check, hbuf, 4); \ + } while (0) +#endif + +/* Load registers with state in inflate() for speed */ +#define LOAD() \ + do { \ + put = strm->next_out; \ + left = strm->avail_out; \ + next = strm->next_in; \ + have = strm->avail_in; \ + hold = state->hold; \ + bits = state->bits; \ + } while (0) + +/* Restore state from registers in inflate() */ +#define RESTORE() \ + do { \ + strm->next_out = put; \ + strm->avail_out = left; \ + strm->next_in = next; \ + strm->avail_in = have; \ + state->hold = hold; \ + state->bits = bits; \ + } while (0) + +/* Clear the input bit accumulator */ +#define INITBITS() \ + do { \ + hold = 0; \ + bits = 0; \ + } while (0) + +/* Get a byte of input into the bit accumulator, or return from inflate() + if there is no input available. */ +#define PULLBYTE() \ + do { \ + if (have == 0) goto inf_leave; \ + have--; \ + hold += (unsigned long)(*next++) << bits; \ + bits += 8; \ + } while (0) + +/* Assure that there are at least n bits in the bit accumulator. If there is + not enough available input to do that, then return from inflate(). */ +#define NEEDBITS(n) \ + do { \ + while (bits < (unsigned)(n)) \ + PULLBYTE(); \ + } while (0) + +/* Return the low n bits of the bit accumulator (n < 16) */ +#define BITS(n) \ + ((unsigned)hold & ((1U << (n)) - 1)) + +/* Remove n bits from the bit accumulator */ +#define DROPBITS(n) \ + do { \ + hold >>= (n); \ + bits -= (unsigned)(n); \ + } while (0) + +/* Remove zero to seven bits as needed to go to a byte boundary */ +#define BYTEBITS() \ + do { \ + hold >>= bits & 7; \ + bits -= bits & 7; \ + } while (0) + +/* + inflate() uses a state machine to process as much input data and generate as + much output data as possible before returning. The state machine is + structured roughly as follows: + + for (;;) switch (state) { + ... + case STATEn: + if (not enough input data or output space to make progress) + return; + ... make progress ... + state = STATEm; + break; + ... + } + + so when inflate() is called again, the same case is attempted again, and + if the appropriate resources are provided, the machine proceeds to the + next state. The NEEDBITS() macro is usually the way the state evaluates + whether it can proceed or should return. NEEDBITS() does the return if + the requested bits are not available. The typical use of the BITS macros + is: + + NEEDBITS(n); + ... do something with BITS(n) ... + DROPBITS(n); + + where NEEDBITS(n) either returns from inflate() if there isn't enough + input left to load n bits into the accumulator, or it continues. BITS(n) + gives the low n bits in the accumulator. When done, DROPBITS(n) drops + the low n bits off the accumulator. INITBITS() clears the accumulator + and sets the number of available bits to zero. BYTEBITS() discards just + enough bits to put the accumulator on a byte boundary. After BYTEBITS() + and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. + + NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return + if there is no input available. The decoding of variable length codes uses + PULLBYTE() directly in order to pull just enough bytes to decode the next + code, and no more. + + Some states loop until they get enough input, making sure that enough + state information is maintained to continue the loop where it left off + if NEEDBITS() returns in the loop. For example, want, need, and keep + would all have to actually be part of the saved state in case NEEDBITS() + returns: + + case STATEw: + while (want < need) { + NEEDBITS(n); + keep[want++] = BITS(n); + DROPBITS(n); + } + state = STATEx; + case STATEx: + + As shown above, if the next state is also the next case, then the break + is omitted. + + A state may also return if there is not enough output space available to + complete that state. Those states are copying stored data, writing a + literal byte, and copying a matching string. + + When returning, a "goto inf_leave" is used to update the total counters, + update the check value, and determine whether any progress has been made + during that inflate() call in order to return the proper return code. + Progress is defined as a change in either strm->avail_in or strm->avail_out. + When there is a window, goto inf_leave will update the window with the last + output written. If a goto inf_leave occurs in the middle of decompression + and there is no window currently, goto inf_leave will create one and copy + output to the window for the next call of inflate(). + + In this implementation, the flush parameter of inflate() only affects the + return code (per zlib.h). inflate() always writes as much as possible to + strm->next_out, given the space available and the provided input--the effect + documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers + the allocation of and copying into a sliding window until necessary, which + provides the effect documented in zlib.h for Z_FINISH when the entire input + stream available. So the only thing the flush parameter actually does is: + when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it + will return Z_BUF_ERROR if it has not reached the end of the stream. + */ + +int ZEXPORT inflate(strm, flush) +z_streamp strm; +int flush; +{ + struct inflate_state FAR *state; + z_const unsigned char FAR *next; /* next input */ + unsigned char FAR *put; /* next output */ + unsigned have, left; /* available input and output */ + unsigned long hold; /* bit buffer */ + unsigned bits; /* bits in bit buffer */ + unsigned in, out; /* save starting available input and output */ + unsigned copy; /* number of stored or match bytes to copy */ + unsigned char FAR *from; /* where to copy match bytes from */ + code here; /* current decoding table entry */ + code last; /* parent table entry */ + unsigned len; /* length to copy for repeats, bits to drop */ + int ret; /* return code */ +#ifdef GUNZIP + unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ +#endif + static const unsigned short order[19] = /* permutation of code lengths */ + {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + + if (inflateStateCheck(strm) || strm->next_out == Z_NULL || + (strm->next_in == Z_NULL && strm->avail_in != 0)) + return Z_STREAM_ERROR; + + state = (struct inflate_state FAR *)strm->state; + if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ + LOAD(); + in = have; + out = left; + ret = Z_OK; + for (;;) + switch (state->mode) { + case HEAD: + if (state->wrap == 0) { + state->mode = TYPEDO; + break; + } + NEEDBITS(16); +#ifdef GUNZIP + if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ + if (state->wbits == 0) + state->wbits = 15; + state->check = crc32(0L, Z_NULL, 0); + CRC2(state->check, hold); + INITBITS(); + state->mode = FLAGS; + break; + } + state->flags = 0; /* expect zlib header */ + if (state->head != Z_NULL) + state->head->done = -1; + if (!(state->wrap & 1) || /* check if zlib header allowed */ +#else + if ( +#endif + ((BITS(8) << 8) + (hold >> 8)) % 31) { + strm->msg = (char *)"incorrect header check"; + state->mode = BAD; + break; + } + if (BITS(4) != Z_DEFLATED) { + strm->msg = (char *)"unknown compression method"; + state->mode = BAD; + break; + } + DROPBITS(4); + len = BITS(4) + 8; + if (state->wbits == 0) + state->wbits = len; + if (len > 15 || len > state->wbits) { + strm->msg = (char *)"invalid window size"; + state->mode = BAD; + break; + } + state->dmax = 1U << len; + Tracev((stderr, "inflate: zlib header ok\n")); + strm->adler = state->check = adler32(0L, Z_NULL, 0); + state->mode = hold & 0x200 ? DICTID : TYPE; + INITBITS(); + break; +#ifdef GUNZIP + case FLAGS: + NEEDBITS(16); + state->flags = (int)(hold); + if ((state->flags & 0xff) != Z_DEFLATED) { + strm->msg = (char *)"unknown compression method"; + state->mode = BAD; + break; + } + if (state->flags & 0xe000) { + strm->msg = (char *)"unknown header flags set"; + state->mode = BAD; + break; + } + if (state->head != Z_NULL) + state->head->text = (int)((hold >> 8) & 1); + if ((state->flags & 0x0200) && (state->wrap & 4)) + CRC2(state->check, hold); + INITBITS(); + state->mode = TIME; + case TIME: + NEEDBITS(32); + if (state->head != Z_NULL) + state->head->time = hold; + if ((state->flags & 0x0200) && (state->wrap & 4)) + CRC4(state->check, hold); + INITBITS(); + state->mode = OS; + case OS: + NEEDBITS(16); + if (state->head != Z_NULL) { + state->head->xflags = (int)(hold & 0xff); + state->head->os = (int)(hold >> 8); + } + if ((state->flags & 0x0200) && (state->wrap & 4)) + CRC2(state->check, hold); + INITBITS(); + state->mode = EXLEN; + case EXLEN: + if (state->flags & 0x0400) { + NEEDBITS(16); + state->length = (unsigned)(hold); + if (state->head != Z_NULL) + state->head->extra_len = (unsigned)hold; + if ((state->flags & 0x0200) && (state->wrap & 4)) + CRC2(state->check, hold); + INITBITS(); + } + else if (state->head != Z_NULL) + state->head->extra = Z_NULL; + state->mode = EXTRA; + case EXTRA: + if (state->flags & 0x0400) { + copy = state->length; + if (copy > have) copy = have; + if (copy) { + if (state->head != Z_NULL && + state->head->extra != Z_NULL) { + len = state->head->extra_len - state->length; + zmemcpy(state->head->extra + len, next, + len + copy > state->head->extra_max ? + state->head->extra_max - len : copy); + } + if ((state->flags & 0x0200) && (state->wrap & 4)) + state->check = crc32(state->check, next, copy); + have -= copy; + next += copy; + state->length -= copy; + } + if (state->length) goto inf_leave; + } + state->length = 0; + state->mode = NAME; + case NAME: + if (state->flags & 0x0800) { + if (have == 0) goto inf_leave; + copy = 0; + do { + len = (unsigned)(next[copy++]); + if (state->head != Z_NULL && + state->head->name != Z_NULL && + state->length < state->head->name_max) + state->head->name[state->length++] = (Bytef)len; + } while (len && copy < have); + if ((state->flags & 0x0200) && (state->wrap & 4)) + state->check = crc32(state->check, next, copy); + have -= copy; + next += copy; + if (len) goto inf_leave; + } + else if (state->head != Z_NULL) + state->head->name = Z_NULL; + state->length = 0; + state->mode = COMMENT; + case COMMENT: + if (state->flags & 0x1000) { + if (have == 0) goto inf_leave; + copy = 0; + do { + len = (unsigned)(next[copy++]); + if (state->head != Z_NULL && + state->head->comment != Z_NULL && + state->length < state->head->comm_max) + state->head->comment[state->length++] = (Bytef)len; + } while (len && copy < have); + if ((state->flags & 0x0200) && (state->wrap & 4)) + state->check = crc32(state->check, next, copy); + have -= copy; + next += copy; + if (len) goto inf_leave; + } + else if (state->head != Z_NULL) + state->head->comment = Z_NULL; + state->mode = HCRC; + case HCRC: + if (state->flags & 0x0200) { + NEEDBITS(16); + if ((state->wrap & 4) && hold != (state->check & 0xffff)) { + strm->msg = (char *)"header crc mismatch"; + state->mode = BAD; + break; + } + INITBITS(); + } + if (state->head != Z_NULL) { + state->head->hcrc = (int)((state->flags >> 9) & 1); + state->head->done = 1; + } + strm->adler = state->check = crc32(0L, Z_NULL, 0); + state->mode = TYPE; + break; +#endif + case DICTID: + NEEDBITS(32); + strm->adler = state->check = ZSWAP32(hold); + INITBITS(); + state->mode = DICT; + case DICT: + if (state->havedict == 0) { + RESTORE(); + return Z_NEED_DICT; + } + strm->adler = state->check = adler32(0L, Z_NULL, 0); + state->mode = TYPE; + case TYPE: + if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; + case TYPEDO: + if (state->last) { + BYTEBITS(); + state->mode = CHECK; + break; + } + NEEDBITS(3); + state->last = BITS(1); + DROPBITS(1); + switch (BITS(2)) { + case 0: /* stored block */ + Tracev((stderr, "inflate: stored block%s\n", + state->last ? " (last)" : "")); + state->mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + Tracev((stderr, "inflate: fixed codes block%s\n", + state->last ? " (last)" : "")); + state->mode = LEN_; /* decode codes */ + if (flush == Z_TREES) { + DROPBITS(2); + goto inf_leave; + } + break; + case 2: /* dynamic block */ + Tracev((stderr, "inflate: dynamic codes block%s\n", + state->last ? " (last)" : "")); + state->mode = TABLE; + break; + case 3: + strm->msg = (char *)"invalid block type"; + state->mode = BAD; + } + DROPBITS(2); + break; + case STORED: + BYTEBITS(); /* go to byte boundary */ + NEEDBITS(32); + if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { + strm->msg = (char *)"invalid stored block lengths"; + state->mode = BAD; + break; + } + state->length = (unsigned)hold & 0xffff; + Tracev((stderr, "inflate: stored length %u\n", + state->length)); + INITBITS(); + state->mode = COPY_; + if (flush == Z_TREES) goto inf_leave; + case COPY_: + state->mode = COPY; + case COPY: + copy = state->length; + if (copy) { + if (copy > have) copy = have; + if (copy > left) copy = left; + if (copy == 0) goto inf_leave; + zmemcpy(put, next, copy); + have -= copy; + next += copy; + left -= copy; + put += copy; + state->length -= copy; + break; + } + Tracev((stderr, "inflate: stored end\n")); + state->mode = TYPE; + break; + case TABLE: + NEEDBITS(14); + state->nlen = BITS(5) + 257; + DROPBITS(5); + state->ndist = BITS(5) + 1; + DROPBITS(5); + state->ncode = BITS(4) + 4; + DROPBITS(4); +#ifndef PKZIP_BUG_WORKAROUND + if (state->nlen > 286 || state->ndist > 30) { + strm->msg = (char *)"too many length or distance symbols"; + state->mode = BAD; + break; + } +#endif + Tracev((stderr, "inflate: table sizes ok\n")); + state->have = 0; + state->mode = LENLENS; + case LENLENS: + while (state->have < state->ncode) { + NEEDBITS(3); + state->lens[order[state->have++]] = (unsigned short)BITS(3); + DROPBITS(3); + } + while (state->have < 19) + state->lens[order[state->have++]] = 0; + state->next = state->codes; + state->lencode = (const code FAR *)(state->next); + state->lenbits = 7; + ret = inflate_table(CODES, state->lens, 19, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid code lengths set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: code lengths ok\n")); + state->have = 0; + state->mode = CODELENS; + case CODELENS: + while (state->have < state->nlen + state->ndist) { + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; + } + else { + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); + if (state->have == 0) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + len = state->lens[state->have - 1]; + copy = 3 + BITS(2); + DROPBITS(2); + } + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); + len = 0; + copy = 3 + BITS(3); + DROPBITS(3); + } + else { + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); + len = 0; + copy = 11 + BITS(7); + DROPBITS(7); + } + if (state->have + copy > state->nlen + state->ndist) { + strm->msg = (char *)"invalid bit length repeat"; + state->mode = BAD; + break; + } + while (copy--) + state->lens[state->have++] = (unsigned short)len; + } + } + + /* handle error breaks in while */ + if (state->mode == BAD) break; + + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state->next = state->codes; + state->lencode = (const code FAR *)(state->next); + state->lenbits = 9; + ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), + &(state->lenbits), state->work); + if (ret) { + strm->msg = (char *)"invalid literal/lengths set"; + state->mode = BAD; + break; + } + state->distcode = (const code FAR *)(state->next); + state->distbits = 6; + ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, + &(state->next), &(state->distbits), state->work); + if (ret) { + strm->msg = (char *)"invalid distances set"; + state->mode = BAD; + break; + } + Tracev((stderr, "inflate: codes ok\n")); + state->mode = LEN_; + if (flush == Z_TREES) goto inf_leave; + case LEN_: + state->mode = LEN; + case LEN: + if (have >= 6 && left >= 258) { + RESTORE(); + inflate_fast(strm, out); + LOAD(); + if (state->mode == TYPE) + state->back = -1; + break; + } + state->back = 0; + for (;;) { + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if (here.op && (here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->lencode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + state->back += last.bits; + } + DROPBITS(here.bits); + state->back += here.bits; + state->length = (unsigned)here.val; + if ((int)(here.op) == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", here.val)); + state->mode = LIT; + break; + } + if (here.op & 32) { + Tracevv((stderr, "inflate: end of block\n")); + state->back = -1; + state->mode = TYPE; + break; + } + if (here.op & 64) { + strm->msg = (char *)"invalid literal/length code"; + state->mode = BAD; + break; + } + state->extra = (unsigned)(here.op) & 15; + state->mode = LENEXT; + case LENEXT: + if (state->extra) { + NEEDBITS(state->extra); + state->length += BITS(state->extra); + DROPBITS(state->extra); + state->back += state->extra; + } + Tracevv((stderr, "inflate: length %u\n", state->length)); + state->was = state->length; + state->mode = DIST; + case DIST: + for (;;) { + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; + PULLBYTE(); + } + if ((here.op & 0xf0) == 0) { + last = here; + for (;;) { + here = state->distcode[last.val + + (BITS(last.bits + last.op) >> last.bits)]; + if ((unsigned)(last.bits + here.bits) <= bits) break; + PULLBYTE(); + } + DROPBITS(last.bits); + state->back += last.bits; + } + DROPBITS(here.bits); + state->back += here.bits; + if (here.op & 64) { + strm->msg = (char *)"invalid distance code"; + state->mode = BAD; + break; + } + state->offset = (unsigned)here.val; + state->extra = (unsigned)(here.op) & 15; + state->mode = DISTEXT; + case DISTEXT: + if (state->extra) { + NEEDBITS(state->extra); + state->offset += BITS(state->extra); + DROPBITS(state->extra); + state->back += state->extra; + } +#ifdef INFLATE_STRICT + if (state->offset > state->dmax) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#endif + Tracevv((stderr, "inflate: distance %u\n", state->offset)); + state->mode = MATCH; + case MATCH: + if (left == 0) goto inf_leave; + copy = out - left; + if (state->offset > copy) { /* copy from window */ + copy = state->offset - copy; + if (copy > state->whave) { + if (state->sane) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + Trace((stderr, "inflate.c too far\n")); + copy -= state->whave; + if (copy > state->length) copy = state->length; + if (copy > left) copy = left; + left -= copy; + state->length -= copy; + do { + *put++ = 0; + } while (--copy); + if (state->length == 0) state->mode = LEN; + break; +#endif + } + if (copy > state->wnext) { + copy -= state->wnext; + from = state->window + (state->wsize - copy); + } + else + from = state->window + (state->wnext - copy); + if (copy > state->length) copy = state->length; + } + else { /* copy from output */ + from = put - state->offset; + copy = state->length; + } + if (copy > left) copy = left; + left -= copy; + state->length -= copy; + do { + *put++ = *from++; + } while (--copy); + if (state->length == 0) state->mode = LEN; + break; + case LIT: + if (left == 0) goto inf_leave; + *put++ = (unsigned char)(state->length); + left--; + state->mode = LEN; + break; + case CHECK: + if (state->wrap) { + NEEDBITS(32); + out -= left; + strm->total_out += out; + state->total += out; + if ((state->wrap & 4) && out) + strm->adler = state->check = + UPDATE(state->check, put - out, out); + out = left; + if ((state->wrap & 4) && ( +#ifdef GUNZIP + state->flags ? hold : +#endif + ZSWAP32(hold)) != state->check) { + strm->msg = (char *)"incorrect data check"; + state->mode = BAD; + break; + } + INITBITS(); + Tracev((stderr, "inflate: check matches trailer\n")); + } +#ifdef GUNZIP + state->mode = LENGTH; + case LENGTH: + if (state->wrap && state->flags) { + NEEDBITS(32); + if (hold != (state->total & 0xffffffffUL)) { + strm->msg = (char *)"incorrect length check"; + state->mode = BAD; + break; + } + INITBITS(); + Tracev((stderr, "inflate: length matches trailer\n")); + } +#endif + state->mode = DONE; + case DONE: + ret = Z_STREAM_END; + goto inf_leave; + case BAD: + ret = Z_DATA_ERROR; + goto inf_leave; + case MEM: + return Z_MEM_ERROR; + case SYNC: + default: + return Z_STREAM_ERROR; + } + + /* + Return from inflate(), updating the total counts and the check value. + If there was no progress during the inflate() call, return a buffer + error. Call updatewindow() to create and/or update the window state. + Note: a memory error from inflate() is non-recoverable. + */ + inf_leave: + RESTORE(); + if (state->wsize || (out != strm->avail_out && state->mode < BAD && + (state->mode < CHECK || flush != Z_FINISH))) + if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { + state->mode = MEM; + return Z_MEM_ERROR; + } + in -= strm->avail_in; + out -= strm->avail_out; + strm->total_in += in; + strm->total_out += out; + state->total += out; + if ((state->wrap & 4) && out) + strm->adler = state->check = + UPDATE(state->check, strm->next_out - out, out); + strm->data_type = (int)state->bits + (state->last ? 64 : 0) + + (state->mode == TYPE ? 128 : 0) + + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); + if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) + ret = Z_BUF_ERROR; + return ret; +} + +int ZEXPORT inflateEnd(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + if (inflateStateCheck(strm)) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (state->window != Z_NULL) ZFREE(strm, state->window); + ZFREE(strm, strm->state); + strm->state = Z_NULL; + Tracev((stderr, "inflate: end\n")); + return Z_OK; +} + +int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength) +z_streamp strm; +Bytef *dictionary; +uInt *dictLength; +{ + struct inflate_state FAR *state; + + /* check state */ + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* copy dictionary */ + if (state->whave && dictionary != Z_NULL) { + zmemcpy(dictionary, state->window + state->wnext, + state->whave - state->wnext); + zmemcpy(dictionary + state->whave - state->wnext, + state->window, state->wnext); + } + if (dictLength != Z_NULL) + *dictLength = state->whave; + return Z_OK; +} + +int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) +z_streamp strm; +const Bytef *dictionary; +uInt dictLength; +{ + struct inflate_state FAR *state; + unsigned long dictid; + int ret; + + /* check state */ + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (state->wrap != 0 && state->mode != DICT) + return Z_STREAM_ERROR; + + /* check for correct dictionary identifier */ + if (state->mode == DICT) { + dictid = adler32(0L, Z_NULL, 0); + dictid = adler32(dictid, dictionary, dictLength); + if (dictid != state->check) + return Z_DATA_ERROR; + } + + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary + dictLength, dictLength); + if (ret) { + state->mode = MEM; + return Z_MEM_ERROR; + } + state->havedict = 1; + Tracev((stderr, "inflate: dictionary set\n")); + return Z_OK; +} + +int ZEXPORT inflateGetHeader(strm, head) +z_streamp strm; +gz_headerp head; +{ + struct inflate_state FAR *state; + + /* check state */ + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; + + /* save header structure */ + state->head = head; + head->done = 0; + return Z_OK; +} + +/* + Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found + or when out of input. When called, *have is the number of pattern bytes + found in order so far, in 0..3. On return *have is updated to the new + state. If on return *have equals four, then the pattern was found and the + return value is how many bytes were read including the last byte of the + pattern. If *have is less than four, then the pattern has not been found + yet and the return value is len. In the latter case, syncsearch() can be + called again with more data and the *have state. *have is initialized to + zero for the first call. + */ +local unsigned syncsearch(have, buf, len) +unsigned FAR *have; +const unsigned char FAR *buf; +unsigned len; +{ + unsigned got; + unsigned next; + + got = *have; + next = 0; + while (next < len && got < 4) { + if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) + got++; + else if (buf[next]) + got = 0; + else + got = 4 - got; + next++; + } + *have = got; + return next; +} + +int ZEXPORT inflateSync(strm) +z_streamp strm; +{ + unsigned len; /* number of bytes to look at or looked at */ + unsigned long in, out; /* temporary to save total_in and total_out */ + unsigned char buf[4]; /* to restore bit buffer to byte string */ + struct inflate_state FAR *state; + + /* check parameters */ + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; + + /* if first time, start search in bit buffer */ + if (state->mode != SYNC) { + state->mode = SYNC; + state->hold <<= state->bits & 7; + state->bits -= state->bits & 7; + len = 0; + while (state->bits >= 8) { + buf[len++] = (unsigned char)(state->hold); + state->hold >>= 8; + state->bits -= 8; + } + state->have = 0; + syncsearch(&(state->have), buf, len); + } + + /* search available input */ + len = syncsearch(&(state->have), strm->next_in, strm->avail_in); + strm->avail_in -= len; + strm->next_in += len; + strm->total_in += len; + + /* return no joy or set up to restart inflate() on a new block */ + if (state->have != 4) return Z_DATA_ERROR; + in = strm->total_in; out = strm->total_out; + inflateReset(strm); + strm->total_in = in; strm->total_out = out; + state->mode = TYPE; + return Z_OK; +} + +/* + Returns true if inflate is currently at the end of a block generated by + Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP + implementation to provide an additional safety check. PPP uses + Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored + block. When decompressing, PPP checks that at the end of input packet, + inflate is waiting for these length bytes. + */ +int ZEXPORT inflateSyncPoint(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + return state->mode == STORED && state->bits == 0; +} + +int ZEXPORT inflateCopy(dest, source) +z_streamp dest; +z_streamp source; +{ + struct inflate_state FAR *state; + struct inflate_state FAR *copy; + unsigned char FAR *window; + unsigned wsize; + + /* check input */ + if (inflateStateCheck(source) || dest == Z_NULL) + return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)source->state; + + /* allocate space */ + copy = (struct inflate_state FAR *) + ZALLOC(source, 1, sizeof(struct inflate_state)); + if (copy == Z_NULL) return Z_MEM_ERROR; + window = Z_NULL; + if (state->window != Z_NULL) { + window = (unsigned char FAR *) + ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); + if (window == Z_NULL) { + ZFREE(source, copy); + return Z_MEM_ERROR; + } + } + + /* copy state */ + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); + copy->strm = dest; + if (state->lencode >= state->codes && + state->lencode <= state->codes + ENOUGH - 1) { + copy->lencode = copy->codes + (state->lencode - state->codes); + copy->distcode = copy->codes + (state->distcode - state->codes); + } + copy->next = copy->codes + (state->next - state->codes); + if (window != Z_NULL) { + wsize = 1U << state->wbits; + zmemcpy(window, state->window, wsize); + } + copy->window = window; + dest->state = (struct internal_state FAR *)copy; + return Z_OK; +} + +int ZEXPORT inflateUndermine(strm, subvert) +z_streamp strm; +int subvert; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + state->sane = !subvert; + return Z_OK; +#else + (void)subvert; + state->sane = 1; + return Z_DATA_ERROR; +#endif +} + +int ZEXPORT inflateValidate(strm, check) +z_streamp strm; +int check; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + if (check) + state->wrap |= 4; + else + state->wrap &= ~4; + return Z_OK; +} + +long ZEXPORT inflateMark(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (inflateStateCheck(strm)) + return -(1L << 16); + state = (struct inflate_state FAR *)strm->state; + return (long)(((unsigned long)((long)state->back)) << 16) + + (state->mode == COPY ? state->length : + (state->mode == MATCH ? state->was - state->length : 0)); +} + +unsigned long ZEXPORT inflateCodesUsed(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + if (inflateStateCheck(strm)) return (unsigned long)-1; + state = (struct inflate_state FAR *)strm->state; + return (unsigned long)(state->next - state->codes); +} diff --git a/deps/zlib-1.2.11/src/inflate.h b/deps/zlib-1.2.11/src/inflate.h new file mode 100644 index 000000000000..a46cce6b6d05 --- /dev/null +++ b/deps/zlib-1.2.11/src/inflate.h @@ -0,0 +1,125 @@ +/* inflate.h -- internal inflate state definition + * Copyright (C) 1995-2016 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* define NO_GZIP when compiling if you want to disable gzip header and + trailer decoding by inflate(). NO_GZIP would be used to avoid linking in + the crc code when it is not needed. For shared libraries, gzip decoding + should be left enabled. */ +#ifndef NO_GZIP +# define GUNZIP +#endif + +/* Possible inflate modes between inflate() calls */ +typedef enum { + HEAD = 16180, /* i: waiting for magic header */ + FLAGS, /* i: waiting for method and flags (gzip) */ + TIME, /* i: waiting for modification time (gzip) */ + OS, /* i: waiting for extra flags and operating system (gzip) */ + EXLEN, /* i: waiting for extra length (gzip) */ + EXTRA, /* i: waiting for extra bytes (gzip) */ + NAME, /* i: waiting for end of file name (gzip) */ + COMMENT, /* i: waiting for end of comment (gzip) */ + HCRC, /* i: waiting for header crc (gzip) */ + DICTID, /* i: waiting for dictionary check value */ + DICT, /* waiting for inflateSetDictionary() call */ + TYPE, /* i: waiting for type bits, including last-flag bit */ + TYPEDO, /* i: same, but skip check to exit inflate on new block */ + STORED, /* i: waiting for stored size (length and complement) */ + COPY_, /* i/o: same as COPY below, but only first time in */ + COPY, /* i/o: waiting for input or output to copy stored block */ + TABLE, /* i: waiting for dynamic block table lengths */ + LENLENS, /* i: waiting for code length code lengths */ + CODELENS, /* i: waiting for length/lit and distance code lengths */ + LEN_, /* i: same as LEN below, but only first time in */ + LEN, /* i: waiting for length/lit/eob code */ + LENEXT, /* i: waiting for length extra bits */ + DIST, /* i: waiting for distance code */ + DISTEXT, /* i: waiting for distance extra bits */ + MATCH, /* o: waiting for output space to copy string */ + LIT, /* o: waiting for output space to write literal */ + CHECK, /* i: waiting for 32-bit check value */ + LENGTH, /* i: waiting for 32-bit length (gzip) */ + DONE, /* finished check, done -- remain here until reset */ + BAD, /* got a data error -- remain here until reset */ + MEM, /* got an inflate() memory error -- remain here until reset */ + SYNC /* looking for synchronization bytes to restart inflate() */ +} inflate_mode; + +/* + State transitions between above modes - + + (most modes can go to BAD or MEM on error -- not shown for clarity) + + Process header: + HEAD -> (gzip) or (zlib) or (raw) + (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> + HCRC -> TYPE + (zlib) -> DICTID or TYPE + DICTID -> DICT -> TYPE + (raw) -> TYPEDO + Read deflate blocks: + TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK + STORED -> COPY_ -> COPY -> TYPE + TABLE -> LENLENS -> CODELENS -> LEN_ + LEN_ -> LEN + Read deflate codes in fixed or dynamic block: + LEN -> LENEXT or LIT or TYPE + LENEXT -> DIST -> DISTEXT -> MATCH -> LEN + LIT -> LEN + Process trailer: + CHECK -> LENGTH -> DONE + */ + +/* State maintained between inflate() calls -- approximately 7K bytes, not + including the allocated sliding window, which is up to 32K bytes. */ +struct inflate_state { + z_streamp strm; /* pointer back to this zlib stream */ + inflate_mode mode; /* current inflate mode */ + int last; /* true if processing last block */ + int wrap; /* bit 0 true for zlib, bit 1 true for gzip, + bit 2 true to validate check value */ + int havedict; /* true if dictionary provided */ + int flags; /* gzip header method and flags (0 if zlib) */ + unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */ + unsigned long check; /* protected copy of check value */ + unsigned long total; /* protected copy of output count */ + gz_headerp head; /* where to save gzip header information */ + /* sliding window */ + unsigned wbits; /* log base 2 of requested window size */ + unsigned wsize; /* window size or zero if not using window */ + unsigned whave; /* valid bytes in the window */ + unsigned wnext; /* window write index */ + unsigned char FAR *window; /* allocated sliding window, if needed */ + /* bit accumulator */ + unsigned long hold; /* input bit accumulator */ + unsigned bits; /* number of bits in "in" */ + /* for string and stored block copying */ + unsigned length; /* literal or length of data to copy */ + unsigned offset; /* distance back to copy string from */ + /* for table and code decoding */ + unsigned extra; /* extra bits needed */ + /* fixed and dynamic code tables */ + code const FAR *lencode; /* starting table for length/literal codes */ + code const FAR *distcode; /* starting table for distance codes */ + unsigned lenbits; /* index bits for lencode */ + unsigned distbits; /* index bits for distcode */ + /* dynamic table building */ + unsigned ncode; /* number of code length code lengths */ + unsigned nlen; /* number of length code lengths */ + unsigned ndist; /* number of distance code lengths */ + unsigned have; /* number of code lengths in lens[] */ + code FAR *next; /* next available space in codes[] */ + unsigned short lens[320]; /* temporary storage for code lengths */ + unsigned short work[288]; /* work area for code table building */ + code codes[ENOUGH]; /* space for code tables */ + int sane; /* if false, allow invalid distance too far */ + int back; /* bits back of last unprocessed length/lit */ + unsigned was; /* initial length of match */ +}; diff --git a/deps/zlib-1.2.11/src/inftrees.c b/deps/zlib-1.2.11/src/inftrees.c new file mode 100644 index 000000000000..2ea08fc13ea8 --- /dev/null +++ b/deps/zlib-1.2.11/src/inftrees.c @@ -0,0 +1,304 @@ +/* inftrees.c -- generate Huffman trees for efficient decoding + * Copyright (C) 1995-2017 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "zutil.h" +#include "inftrees.h" + +#define MAXBITS 15 + +const char inflate_copyright[] = + " inflate 1.2.11 Copyright 1995-2017 Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* + Build a set of tables to decode the provided canonical Huffman code. + The code lengths are lens[0..codes-1]. The result starts at *table, + whose indices are 0..2^bits-1. work is a writable array of at least + lens shorts, which is used as a work area. type is the type of code + to be generated, CODES, LENS, or DISTS. On return, zero is success, + -1 is an invalid code, and +1 means that ENOUGH isn't enough. table + on return points to the next available entry's address. bits is the + requested root table index bits, and on return it is the actual root + table index bits. It will differ if the request is greater than the + longest code or if it is less than the shortest code. + */ +int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) +codetype type; +unsigned short FAR *lens; +unsigned codes; +code FAR * FAR *table; +unsigned FAR *bits; +unsigned short FAR *work; +{ + unsigned len; /* a code's length in bits */ + unsigned sym; /* index of code symbols */ + unsigned min, max; /* minimum and maximum code lengths */ + unsigned root; /* number of index bits for root table */ + unsigned curr; /* number of index bits for current table */ + unsigned drop; /* code bits to drop for sub-table */ + int left; /* number of prefix codes available */ + unsigned used; /* code entries in table used */ + unsigned huff; /* Huffman code */ + unsigned incr; /* for incrementing code, index */ + unsigned fill; /* index for replicating entries */ + unsigned low; /* low bits for current root entry */ + unsigned mask; /* mask for low root bits */ + code here; /* table entry for duplication */ + code FAR *next; /* next available space in table */ + const unsigned short FAR *base; /* base value table to use */ + const unsigned short FAR *extra; /* extra bits table to use */ + unsigned match; /* use base and extra for symbol >= match */ + unsigned short count[MAXBITS+1]; /* number of codes of each length */ + unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ + static const unsigned short lbase[31] = { /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + static const unsigned short lext[31] = { /* Length codes 257..285 extra */ + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 77, 202}; + static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577, 0, 0}; + static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ + 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 64, 64}; + + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. + + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ + + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) + count[len] = 0; + for (sym = 0; sym < codes; sym++) + count[lens[sym]]++; + + /* bound code lengths, force root to be within code lengths */ + root = *bits; + for (max = MAXBITS; max >= 1; max--) + if (count[max] != 0) break; + if (root > max) root = max; + if (max == 0) { /* no symbols to code at all */ + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)1; + here.val = (unsigned short)0; + *(*table)++ = here; /* make a table to force an error */ + *(*table)++ = here; + *bits = 1; + return 0; /* no symbols, but wait for decoding to report error */ + } + for (min = 1; min < max; min++) + if (count[min] != 0) break; + if (root < min) root = min; + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) return -1; /* over-subscribed */ + } + if (left > 0 && (type == CODES || max != 1)) + return -1; /* incomplete set */ + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) + offs[len + 1] = offs[len] + count[len]; + + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) + if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; + + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. + + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. + + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. + + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ + + /* set up for code type */ + switch (type) { + case CODES: + base = extra = work; /* dummy value--not used */ + match = 20; + break; + case LENS: + base = lbase; + extra = lext; + match = 257; + break; + default: /* DISTS */ + base = dbase; + extra = dext; + match = 0; + } + + /* initialize state for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = *table; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = (unsigned)(-1); /* trigger new sub-table when len > root */ + used = 1U << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ + + /* check available table space */ + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) + return 1; + + /* process all codes and make table entries */ + for (;;) { + /* create table entry */ + here.bits = (unsigned char)(len - drop); + if (work[sym] + 1U < match) { + here.op = (unsigned char)0; + here.val = work[sym]; + } + else if (work[sym] >= match) { + here.op = (unsigned char)(extra[work[sym] - match]); + here.val = base[work[sym] - match]; + } + else { + here.op = (unsigned char)(32 + 64); /* end of block */ + here.val = 0; + } + + /* replicate for those indices with low len bits equal to huff */ + incr = 1U << (len - drop); + fill = 1U << curr; + min = fill; /* save offset to next table */ + do { + fill -= incr; + next[(huff >> drop) + fill] = here; + } while (fill != 0); + + /* backwards increment the len-bit code huff */ + incr = 1U << (len - 1); + while (huff & incr) + incr >>= 1; + if (incr != 0) { + huff &= incr - 1; + huff += incr; + } + else + huff = 0; + + /* go to next symbol, update count, len */ + sym++; + if (--(count[len]) == 0) { + if (len == max) break; + len = lens[work[sym]]; + } + + /* create new sub-table if needed */ + if (len > root && (huff & mask) != low) { + /* if first time, transition to sub-tables */ + if (drop == 0) + drop = root; + + /* increment past last table */ + next += min; /* here min is 1 << curr */ + + /* determine length of next table */ + curr = len - drop; + left = (int)(1 << curr); + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) break; + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1U << curr; + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) + return 1; + + /* point entry in root table to sub-table */ + low = huff & mask; + (*table)[low].op = (unsigned char)curr; + (*table)[low].bits = (unsigned char)root; + (*table)[low].val = (unsigned short)(next - *table); + } + } + + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff != 0) { + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)(len - drop); + here.val = (unsigned short)0; + next[huff] = here; + } + + /* set return parameters */ + *table += used; + *bits = root; + return 0; +} diff --git a/deps/zlib-1.2.11/src/inftrees.h b/deps/zlib-1.2.11/src/inftrees.h new file mode 100644 index 000000000000..baa53a0b1a19 --- /dev/null +++ b/deps/zlib-1.2.11/src/inftrees.h @@ -0,0 +1,62 @@ +/* inftrees.h -- header to use inftrees.c + * Copyright (C) 1995-2005, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* Structure for decoding tables. Each entry provides either the + information needed to do the operation requested by the code that + indexed that table entry, or it provides a pointer to another + table that indexes more bits of the code. op indicates whether + the entry is a pointer to another table, a literal, a length or + distance, an end-of-block, or an invalid code. For a table + pointer, the low four bits of op is the number of index bits of + that table. For a length or distance, the low four bits of op + is the number of extra bits to get after the code. bits is + the number of bits in this code or part of the code to drop off + of the bit buffer. val is the actual byte to output in the case + of a literal, the base length or distance, or the offset from + the current table to the next table. Each entry is four bytes. */ +typedef struct { + unsigned char op; /* operation, extra bits, table bits */ + unsigned char bits; /* bits in this part of the code */ + unsigned short val; /* offset in table or code value */ +} code; + +/* op values as set by inflate_table(): + 00000000 - literal + 0000tttt - table link, tttt != 0 is the number of table index bits + 0001eeee - length or distance, eeee is the number of extra bits + 01100000 - end of block + 01000000 - invalid code + */ + +/* Maximum size of the dynamic table. The maximum number of code structures is + 1444, which is the sum of 852 for literal/length codes and 592 for distance + codes. These values were found by exhaustive searches using the program + examples/enough.c found in the zlib distribtution. The arguments to that + program are the number of symbols, the initial root table size, and the + maximum bit length of a code. "enough 286 9 15" for literal/length codes + returns returns 852, and "enough 30 6 15" for distance codes returns 592. + The initial root table size (9 or 6) is found in the fifth argument of the + inflate_table() calls in inflate.c and infback.c. If the root table size is + changed, then these maximum sizes would be need to be recalculated and + updated. */ +#define ENOUGH_LENS 852 +#define ENOUGH_DISTS 592 +#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) + +/* Type of code to build for inflate_table() */ +typedef enum { + CODES, + LENS, + DISTS +} codetype; + +int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens, + unsigned codes, code FAR * FAR *table, + unsigned FAR *bits, unsigned short FAR *work)); diff --git a/deps/zlib-1.2.11/src/trees.c b/deps/zlib-1.2.11/src/trees.c new file mode 100644 index 000000000000..50cf4b4571cf --- /dev/null +++ b/deps/zlib-1.2.11/src/trees.c @@ -0,0 +1,1203 @@ +/* trees.c -- output deflated data using Huffman coding + * Copyright (C) 1995-2017 Jean-loup Gailly + * detect_data_type() function provided freely by Cosmin Truta, 2006 + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process uses several Huffman trees. The more + * common source values are represented by shorter bit sequences. + * + * Each code tree is stored in a compressed form which is itself + * a Huffman encoding of the lengths of all the code strings (in + * ascending order by source values). The actual code strings are + * reconstructed from the lengths in the inflate process, as described + * in the deflate specification. + * + * REFERENCES + * + * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". + * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc + * + * Storer, James A. + * Data Compression: Methods and Theory, pp. 49-50. + * Computer Science Press, 1988. ISBN 0-7167-8156-5. + * + * Sedgewick, R. + * Algorithms, p290. + * Addison-Wesley, 1983. ISBN 0-201-06672-6. + */ + +/* @(#) $Id$ */ + +/* #define GEN_TREES_H */ + +#include "deflate.h" + +#ifdef ZLIB_DEBUG +# include +#endif + +/* =========================================================================== + * Constants + */ + +#define MAX_BL_BITS 7 +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +#define END_BLOCK 256 +/* end of block literal code */ + +#define REP_3_6 16 +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ + +#define REPZ_3_10 17 +/* repeat a zero length 3-10 times (3 bits of repeat count) */ + +#define REPZ_11_138 18 +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ + = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; + +local const int extra_dbits[D_CODES] /* extra bits for each distance code */ + = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ + = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; + +local const uch bl_order[BL_CODES] + = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +/* =========================================================================== + * Local data. These are initialized only once. + */ + +#define DIST_CODE_LEN 512 /* see definition of array dist_code below */ + +#if defined(GEN_TREES_H) || !defined(STDC) +/* non ANSI compilers may not accept trees.h */ + +local ct_data static_ltree[L_CODES+2]; +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ + +local ct_data static_dtree[D_CODES]; +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + +uch _dist_code[DIST_CODE_LEN]; +/* Distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + +uch _length_code[MAX_MATCH-MIN_MATCH+1]; +/* length code for each normalized match length (0 == MIN_MATCH) */ + +local int base_length[LENGTH_CODES]; +/* First normalized length for each code (0 = MIN_MATCH) */ + +local int base_dist[D_CODES]; +/* First normalized distance for each code (0 = distance of 1) */ + +#else +# include "trees.h" +#endif /* GEN_TREES_H */ + +struct static_tree_desc_s { + const ct_data *static_tree; /* static tree or NULL */ + const intf *extra_bits; /* extra bits for each code or NULL */ + int extra_base; /* base index for extra_bits */ + int elems; /* max number of elements in the tree */ + int max_length; /* max bit length for the codes */ +}; + +local const static_tree_desc static_l_desc = +{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; + +local const static_tree_desc static_d_desc = +{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; + +local const static_tree_desc static_bl_desc = +{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; + +/* =========================================================================== + * Local (static) routines in this file. + */ + +local void tr_static_init OF((void)); +local void init_block OF((deflate_state *s)); +local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); +local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); +local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); +local void build_tree OF((deflate_state *s, tree_desc *desc)); +local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); +local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); +local int build_bl_tree OF((deflate_state *s)); +local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, + int blcodes)); +local void compress_block OF((deflate_state *s, const ct_data *ltree, + const ct_data *dtree)); +local int detect_data_type OF((deflate_state *s)); +local unsigned bi_reverse OF((unsigned value, int length)); +local void bi_windup OF((deflate_state *s)); +local void bi_flush OF((deflate_state *s)); + +#ifdef GEN_TREES_H +local void gen_trees_header OF((void)); +#endif + +#ifndef ZLIB_DEBUG +# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) + /* Send a code of the given tree. c and tree must not have side effects */ + +#else /* !ZLIB_DEBUG */ +# define send_code(s, c, tree) \ + { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ + send_bits(s, tree[c].Code, tree[c].Len); } +#endif + +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +#define put_short(s, w) { \ + put_byte(s, (uch)((w) & 0xff)); \ + put_byte(s, (uch)((ush)(w) >> 8)); \ +} + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +#ifdef ZLIB_DEBUG +local void send_bits OF((deflate_state *s, int value, int length)); + +local void send_bits(s, value, length) + deflate_state *s; + int value; /* value to send */ + int length; /* number of bits */ +{ + Tracevv((stderr," l %2d v %4x ", length, value)); + Assert(length > 0 && length <= 15, "invalid length"); + s->bits_sent += (ulg)length; + + /* If not enough room in bi_buf, use (valid) bits from bi_buf and + * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) + * unused bits in value. + */ + if (s->bi_valid > (int)Buf_size - length) { + s->bi_buf |= (ush)value << s->bi_valid; + put_short(s, s->bi_buf); + s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); + s->bi_valid += length - Buf_size; + } else { + s->bi_buf |= (ush)value << s->bi_valid; + s->bi_valid += length; + } +} +#else /* !ZLIB_DEBUG */ + +#define send_bits(s, value, length) \ +{ int len = length;\ + if (s->bi_valid > (int)Buf_size - len) {\ + int val = (int)value;\ + s->bi_buf |= (ush)val << s->bi_valid;\ + put_short(s, s->bi_buf);\ + s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ + s->bi_valid += len - Buf_size;\ + } else {\ + s->bi_buf |= (ush)(value) << s->bi_valid;\ + s->bi_valid += len;\ + }\ +} +#endif /* ZLIB_DEBUG */ + + +/* the arguments must not have side effects */ + +/* =========================================================================== + * Initialize the various 'constant' tables. + */ +local void tr_static_init() +{ +#if defined(GEN_TREES_H) || !defined(STDC) + static int static_init_done = 0; + int n; /* iterates over tree elements */ + int bits; /* bit counter */ + int length; /* length value */ + int code; /* code value */ + int dist; /* distance index */ + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + if (static_init_done) return; + + /* For some embedded targets, global variables are not initialized: */ +#ifdef NO_INIT_GLOBAL_POINTERS + static_l_desc.static_tree = static_ltree; + static_l_desc.extra_bits = extra_lbits; + static_d_desc.static_tree = static_dtree; + static_d_desc.extra_bits = extra_dbits; + static_bl_desc.extra_bits = extra_blbits; +#endif + + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES-1; code++) { + base_length[code] = length; + for (n = 0; n < (1< dist code (0..29) */ + dist = 0; + for (code = 0 ; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */ + for ( ; code < D_CODES; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { + _dist_code[256 + dist++] = (uch)code; + } + } + Assert (dist == 256, "tr_static_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; + n = 0; + while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; + while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; + while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; + while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + static_dtree[n].Len = 5; + static_dtree[n].Code = bi_reverse((unsigned)n, 5); + } + static_init_done = 1; + +# ifdef GEN_TREES_H + gen_trees_header(); +# endif +#endif /* defined(GEN_TREES_H) || !defined(STDC) */ +} + +/* =========================================================================== + * Genererate the file trees.h describing the static trees. + */ +#ifdef GEN_TREES_H +# ifndef ZLIB_DEBUG +# include +# endif + +# define SEPARATOR(i, last, width) \ + ((i) == (last)? "\n};\n\n" : \ + ((i) % (width) == (width)-1 ? ",\n" : ", ")) + +void gen_trees_header() +{ + FILE *header = fopen("trees.h", "w"); + int i; + + Assert (header != NULL, "Can't open trees.h"); + fprintf(header, + "/* header created automatically with -DGEN_TREES_H */\n\n"); + + fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); + for (i = 0; i < L_CODES+2; i++) { + fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, + static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); + } + + fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); + for (i = 0; i < D_CODES; i++) { + fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, + static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); + } + + fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); + for (i = 0; i < DIST_CODE_LEN; i++) { + fprintf(header, "%2u%s", _dist_code[i], + SEPARATOR(i, DIST_CODE_LEN-1, 20)); + } + + fprintf(header, + "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); + for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { + fprintf(header, "%2u%s", _length_code[i], + SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); + } + + fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); + for (i = 0; i < LENGTH_CODES; i++) { + fprintf(header, "%1u%s", base_length[i], + SEPARATOR(i, LENGTH_CODES-1, 20)); + } + + fprintf(header, "local const int base_dist[D_CODES] = {\n"); + for (i = 0; i < D_CODES; i++) { + fprintf(header, "%5u%s", base_dist[i], + SEPARATOR(i, D_CODES-1, 10)); + } + + fclose(header); +} +#endif /* GEN_TREES_H */ + +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +void ZLIB_INTERNAL _tr_init(s) + deflate_state *s; +{ + tr_static_init(); + + s->l_desc.dyn_tree = s->dyn_ltree; + s->l_desc.stat_desc = &static_l_desc; + + s->d_desc.dyn_tree = s->dyn_dtree; + s->d_desc.stat_desc = &static_d_desc; + + s->bl_desc.dyn_tree = s->bl_tree; + s->bl_desc.stat_desc = &static_bl_desc; + + s->bi_buf = 0; + s->bi_valid = 0; +#ifdef ZLIB_DEBUG + s->compressed_len = 0L; + s->bits_sent = 0L; +#endif + + /* Initialize the first block of the first file: */ + init_block(s); +} + +/* =========================================================================== + * Initialize a new block. + */ +local void init_block(s) + deflate_state *s; +{ + int n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; + for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; + for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; + s->last_lit = s->matches = 0; +} + +#define SMALLEST 1 +/* Index within the heap array of least frequent node in the Huffman tree */ + + +/* =========================================================================== + * Remove the smallest element from the heap and recreate the heap with + * one less element. Updates heap and heap_len. + */ +#define pqremove(s, tree, top) \ +{\ + top = s->heap[SMALLEST]; \ + s->heap[SMALLEST] = s->heap[s->heap_len--]; \ + pqdownheap(s, tree, SMALLEST); \ +} + +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +#define smaller(tree, n, m, depth) \ + (tree[n].Freq < tree[m].Freq || \ + (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +local void pqdownheap(s, tree, k) + deflate_state *s; + ct_data *tree; /* the tree to restore */ + int k; /* node to move down */ +{ + int v = s->heap[k]; + int j = k << 1; /* left son of k */ + while (j <= s->heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s->heap_len && + smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s->heap[j], s->depth)) break; + + /* Exchange v with the smallest son */ + s->heap[k] = s->heap[j]; k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + s->heap[k] = v; +} + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +local void gen_bitlen(s, desc) + deflate_state *s; + tree_desc *desc; /* the tree descriptor */ +{ + ct_data *tree = desc->dyn_tree; + int max_code = desc->max_code; + const ct_data *stree = desc->stat_desc->static_tree; + const intf *extra = desc->stat_desc->extra_bits; + int base = desc->stat_desc->extra_base; + int max_length = desc->stat_desc->max_length; + int h; /* heap index */ + int n, m; /* iterate over the tree elements */ + int bits; /* bit length */ + int xbits; /* extra bits */ + ush f; /* frequency */ + int overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ + + for (h = s->heap_max+1; h < HEAP_SIZE; h++) { + n = s->heap[h]; + bits = tree[tree[n].Dad].Len + 1; + if (bits > max_length) bits = max_length, overflow++; + tree[n].Len = (ush)bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) continue; /* not a leaf node */ + + s->bl_count[bits]++; + xbits = 0; + if (n >= base) xbits = extra[n-base]; + f = tree[n].Freq; + s->opt_len += (ulg)f * (unsigned)(bits + xbits); + if (stree) s->static_len += (ulg)f * (unsigned)(stree[n].Len + xbits); + } + if (overflow == 0) return; + + Tracev((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length-1; + while (s->bl_count[bits] == 0) bits--; + s->bl_count[bits]--; /* move one leaf down the tree */ + s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ + s->bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits != 0; bits--) { + n = s->bl_count[bits]; + while (n != 0) { + m = s->heap[--h]; + if (m > max_code) continue; + if ((unsigned) tree[m].Len != (unsigned) bits) { + Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s->opt_len += ((ulg)bits - tree[m].Len) * tree[m].Freq; + tree[m].Len = (ush)bits; + } + n--; + } + } +} + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +local void gen_codes (tree, max_code, bl_count) + ct_data *tree; /* the tree to decorate */ + int max_code; /* largest code with non zero frequency */ + ushf *bl_count; /* number of codes at each bit length */ +{ + ush next_code[MAX_BITS+1]; /* next code value for each bit length */ + unsigned code = 0; /* running code value */ + int bits; /* bit index */ + int n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + code = (code + bl_count[bits-1]) << 1; + next_code[bits] = (ush)code; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + Assert (code + bl_count[MAX_BITS]-1 == (1<dyn_tree; + const ct_data *stree = desc->stat_desc->static_tree; + int elems = desc->stat_desc->elems; + int n, m; /* iterate over heap elements */ + int max_code = -1; /* largest code with non zero frequency */ + int node; /* new node being created */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s->heap_len = 0, s->heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) { + if (tree[n].Freq != 0) { + s->heap[++(s->heap_len)] = max_code = n; + s->depth[n] = 0; + } else { + tree[n].Len = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s->heap_len < 2) { + node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); + tree[node].Freq = 1; + s->depth[node] = 0; + s->opt_len--; if (stree) s->static_len -= stree[node].Len; + /* node is 0 or 1 so it does not have extra bits */ + } + desc->max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + pqremove(s, tree, n); /* n = node of least frequency */ + m = s->heap[SMALLEST]; /* m = node of next least frequency */ + + s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ + s->heap[--(s->heap_max)] = m; + + /* Create a new node father of n and m */ + tree[node].Freq = tree[n].Freq + tree[m].Freq; + s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? + s->depth[n] : s->depth[m]) + 1); + tree[n].Dad = tree[m].Dad = (ush)node; +#ifdef DUMP_BL_TREE + if (tree == s->bl_tree) { + fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", + node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); + } +#endif + /* and insert the new node in the heap */ + s->heap[SMALLEST] = node++; + pqdownheap(s, tree, SMALLEST); + + } while (s->heap_len >= 2); + + s->heap[--(s->heap_max)] = s->heap[SMALLEST]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, (tree_desc *)desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes ((ct_data *)tree, max_code, s->bl_count); +} + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +local void scan_tree (s, tree, max_code) + deflate_state *s; + ct_data *tree; /* the tree to be scanned */ + int max_code; /* and its largest code of non zero frequency */ +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + if (nextlen == 0) max_count = 138, min_count = 3; + tree[max_code+1].Len = (ush)0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; nextlen = tree[n+1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + s->bl_tree[curlen].Freq += count; + } else if (curlen != 0) { + if (curlen != prevlen) s->bl_tree[curlen].Freq++; + s->bl_tree[REP_3_6].Freq++; + } else if (count <= 10) { + s->bl_tree[REPZ_3_10].Freq++; + } else { + s->bl_tree[REPZ_11_138].Freq++; + } + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138, min_count = 3; + } else if (curlen == nextlen) { + max_count = 6, min_count = 3; + } else { + max_count = 7, min_count = 4; + } + } +} + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +local void send_tree (s, tree, max_code) + deflate_state *s; + ct_data *tree; /* the tree to be scanned */ + int max_code; /* and its largest code of non zero frequency */ +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen == 0) max_count = 138, min_count = 3; + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; nextlen = tree[n+1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + do { send_code(s, curlen, s->bl_tree); } while (--count != 0); + + } else if (curlen != 0) { + if (curlen != prevlen) { + send_code(s, curlen, s->bl_tree); count--; + } + Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); + + } else if (count <= 10) { + send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); + + } else { + send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); + } + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138, min_count = 3; + } else if (curlen == nextlen) { + max_count = 6, min_count = 3; + } else { + max_count = 7, min_count = 4; + } + } +} + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +local int build_bl_tree(s) + deflate_state *s; +{ + int max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); + scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, (tree_desc *)(&(s->bl_desc))); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { + if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; + } + /* Update opt_len to include the bit length tree and counts */ + s->opt_len += 3*((ulg)max_blindex+1) + 5+5+4; + Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + s->opt_len, s->static_len)); + + return max_blindex; +} + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +local void send_all_trees(s, lcodes, dcodes, blcodes) + deflate_state *s; + int lcodes, dcodes, blcodes; /* number of codes for each tree */ +{ + int rank; /* index in bl_order */ + + Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + "too many codes"); + Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes-1, 5); + send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); + } + Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); + + send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ + Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + + send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ + Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +} + +/* =========================================================================== + * Send a stored block + */ +void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) + deflate_state *s; + charf *buf; /* input block */ + ulg stored_len; /* length of input block */ + int last; /* one if this is the last block for a file */ +{ + send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */ + bi_windup(s); /* align on byte boundary */ + put_short(s, (ush)stored_len); + put_short(s, (ush)~stored_len); + zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len); + s->pending += stored_len; +#ifdef ZLIB_DEBUG + s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; + s->compressed_len += (stored_len + 4) << 3; + s->bits_sent += 2*16; + s->bits_sent += stored_len<<3; +#endif +} + +/* =========================================================================== + * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) + */ +void ZLIB_INTERNAL _tr_flush_bits(s) + deflate_state *s; +{ + bi_flush(s); +} + +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + */ +void ZLIB_INTERNAL _tr_align(s) + deflate_state *s; +{ + send_bits(s, STATIC_TREES<<1, 3); + send_code(s, END_BLOCK, static_ltree); +#ifdef ZLIB_DEBUG + s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ +#endif + bi_flush(s); +} + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and write out the encoded block. + */ +void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) + deflate_state *s; + charf *buf; /* input block, or NULL if too old */ + ulg stored_len; /* length of input block */ + int last; /* one if this is the last block for a file */ +{ + ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + int max_blindex = 0; /* index of last bit length code of non zero freq */ + + /* Build the Huffman trees unless a stored block is forced */ + if (s->level > 0) { + + /* Check if the file is binary or text */ + if (s->strm->data_type == Z_UNKNOWN) + s->strm->data_type = detect_data_type(s); + + /* Construct the literal and distance trees */ + build_tree(s, (tree_desc *)(&(s->l_desc))); + Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + s->static_len)); + + build_tree(s, (tree_desc *)(&(s->d_desc))); + Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); + + /* Determine the best encoding. Compute the block lengths in bytes. */ + opt_lenb = (s->opt_len+3+7)>>3; + static_lenb = (s->static_len+3+7)>>3; + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + s->last_lit)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + + } else { + Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } + +#ifdef FORCE_STORED + if (buf != (char*)0) { /* force stored block */ +#else + if (stored_len+4 <= opt_lenb && buf != (char*)0) { + /* 4: two words for the lengths */ +#endif + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block(s, buf, stored_len, last); + +#ifdef FORCE_STATIC + } else if (static_lenb >= 0) { /* force static trees */ +#else + } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { +#endif + send_bits(s, (STATIC_TREES<<1)+last, 3); + compress_block(s, (const ct_data *)static_ltree, + (const ct_data *)static_dtree); +#ifdef ZLIB_DEBUG + s->compressed_len += 3 + s->static_len; +#endif + } else { + send_bits(s, (DYN_TREES<<1)+last, 3); + send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, + max_blindex+1); + compress_block(s, (const ct_data *)s->dyn_ltree, + (const ct_data *)s->dyn_dtree); +#ifdef ZLIB_DEBUG + s->compressed_len += 3 + s->opt_len; +#endif + } + Assert (s->compressed_len == s->bits_sent, "bad compressed size"); + /* The above check is made mod 2^32, for files larger than 512 MB + * and uLong implemented on 32 bits. + */ + init_block(s); + + if (last) { + bi_windup(s); +#ifdef ZLIB_DEBUG + s->compressed_len += 7; /* align on byte boundary */ +#endif + } + Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + s->compressed_len-7*last)); +} + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +int ZLIB_INTERNAL _tr_tally (s, dist, lc) + deflate_state *s; + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ +{ + s->d_buf[s->last_lit] = (ush)dist; + s->l_buf[s->last_lit++] = (uch)lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; + } else { + s->matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + Assert((ush)dist < (ush)MAX_DIST(s) && + (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); + + s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; + s->dyn_dtree[d_code(dist)].Freq++; + } + +#ifdef TRUNCATE_BLOCK + /* Try to guess if it is profitable to stop the current block here */ + if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { + /* Compute an upper bound for the compressed length */ + ulg out_length = (ulg)s->last_lit*8L; + ulg in_length = (ulg)((long)s->strstart - s->block_start); + int dcode; + for (dcode = 0; dcode < D_CODES; dcode++) { + out_length += (ulg)s->dyn_dtree[dcode].Freq * + (5L+extra_dbits[dcode]); + } + out_length >>= 3; + Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", + s->last_lit, in_length, out_length, + 100L - out_length*100L/in_length)); + if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; + } +#endif + return (s->last_lit == s->lit_bufsize-1); + /* We avoid equality with lit_bufsize because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +local void compress_block(s, ltree, dtree) + deflate_state *s; + const ct_data *ltree; /* literal tree */ + const ct_data *dtree; /* distance tree */ +{ + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ + unsigned lx = 0; /* running index in l_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + + if (s->last_lit != 0) do { + dist = s->d_buf[lx]; + lc = s->l_buf[lx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = _length_code[lc]; + send_code(s, code+LITERALS+1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra != 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + Assert (code < D_CODES, "bad d_code"); + + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra != 0) { + dist -= (unsigned)base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + + /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ + Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, + "pendingBuf overflow"); + + } while (lx < s->last_lit); + + send_code(s, END_BLOCK, ltree); +} + +/* =========================================================================== + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). + * IN assertion: the fields Freq of dyn_ltree are set. + */ +local int detect_data_type(s) + deflate_state *s; +{ + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + unsigned long black_mask = 0xf3ffc07fUL; + int n; + + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>= 1) + if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0)) + return Z_BINARY; + + /* Check for textual ("white-listed") bytes. */ + if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 + || s->dyn_ltree[13].Freq != 0) + return Z_TEXT; + for (n = 32; n < LITERALS; n++) + if (s->dyn_ltree[n].Freq != 0) + return Z_TEXT; + + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; +} + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +local unsigned bi_reverse(code, len) + unsigned code; /* the value to invert */ + int len; /* its bit length */ +{ + register unsigned res = 0; + do { + res |= code & 1; + code >>= 1, res <<= 1; + } while (--len > 0); + return res >> 1; +} + +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +local void bi_flush(s) + deflate_state *s; +{ + if (s->bi_valid == 16) { + put_short(s, s->bi_buf); + s->bi_buf = 0; + s->bi_valid = 0; + } else if (s->bi_valid >= 8) { + put_byte(s, (Byte)s->bi_buf); + s->bi_buf >>= 8; + s->bi_valid -= 8; + } +} + +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +local void bi_windup(s) + deflate_state *s; +{ + if (s->bi_valid > 8) { + put_short(s, s->bi_buf); + } else if (s->bi_valid > 0) { + put_byte(s, (Byte)s->bi_buf); + } + s->bi_buf = 0; + s->bi_valid = 0; +#ifdef ZLIB_DEBUG + s->bits_sent = (s->bits_sent+7) & ~7; +#endif +} diff --git a/deps/zlib-1.2.11/src/trees.h b/deps/zlib-1.2.11/src/trees.h new file mode 100644 index 000000000000..d35639d82a27 --- /dev/null +++ b/deps/zlib-1.2.11/src/trees.h @@ -0,0 +1,128 @@ +/* header created automatically with -DGEN_TREES_H */ + +local const ct_data static_ltree[L_CODES+2] = { +{{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}}, +{{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}}, +{{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}}, +{{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}}, +{{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}}, +{{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}}, +{{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}}, +{{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}}, +{{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}}, +{{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}}, +{{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}}, +{{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}}, +{{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}}, +{{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}}, +{{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}}, +{{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}}, +{{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}}, +{{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}}, +{{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}}, +{{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}}, +{{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}}, +{{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}}, +{{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}}, +{{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}}, +{{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}}, +{{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}}, +{{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}}, +{{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}}, +{{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}}, +{{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}}, +{{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}}, +{{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}}, +{{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}}, +{{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}}, +{{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}}, +{{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}}, +{{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}}, +{{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}}, +{{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}}, +{{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}}, +{{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}}, +{{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}}, +{{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}}, +{{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}}, +{{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}}, +{{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}}, +{{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}}, +{{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}}, +{{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}}, +{{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}}, +{{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}}, +{{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}}, +{{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}}, +{{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}}, +{{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}}, +{{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}}, +{{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}}, +{{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}} +}; + +local const ct_data static_dtree[D_CODES] = { +{{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}}, +{{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}}, +{{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}}, +{{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}}, +{{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}}, +{{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} +}; + +const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { + 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, + 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, +10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, +11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, +12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, +13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, +13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, +14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, +14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, +14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, +15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, +15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, +15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, +18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, +23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, +26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, +26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, +27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 +}; + +const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, +13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, +17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, +19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, +21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, +22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, +23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, +25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, +25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, +26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, +26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 +}; + +local const int base_length[LENGTH_CODES] = { +0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, +64, 80, 96, 112, 128, 160, 192, 224, 0 +}; + +local const int base_dist[D_CODES] = { + 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, + 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, + 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 +}; + diff --git a/deps/zlib-1.2.11/src/uncompr.c b/deps/zlib-1.2.11/src/uncompr.c new file mode 100644 index 000000000000..f03a1a865e34 --- /dev/null +++ b/deps/zlib-1.2.11/src/uncompr.c @@ -0,0 +1,93 @@ +/* uncompr.c -- decompress a memory buffer + * Copyright (C) 1995-2003, 2010, 2014, 2016 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#define ZLIB_INTERNAL +#include "zlib.h" + +/* =========================================================================== + Decompresses the source buffer into the destination buffer. *sourceLen is + the byte length of the source buffer. Upon entry, *destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, + *destLen is the size of the decompressed data and *sourceLen is the number + of source bytes consumed. Upon return, source + *sourceLen points to the + first unused input byte. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, or + Z_DATA_ERROR if the input data was corrupted, including if the input data is + an incomplete zlib stream. +*/ +int ZEXPORT uncompress2 (dest, destLen, source, sourceLen) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong *sourceLen; +{ + z_stream stream; + int err; + const uInt max = (uInt)-1; + uLong len, left; + Byte buf[1]; /* for detection of incomplete stream when *destLen == 0 */ + + len = *sourceLen; + if (*destLen) { + left = *destLen; + *destLen = 0; + } + else { + left = 1; + dest = buf; + } + + stream.next_in = (z_const Bytef *)source; + stream.avail_in = 0; + stream.zalloc = (alloc_func)0; + stream.zfree = (free_func)0; + stream.opaque = (voidpf)0; + + err = inflateInit(&stream); + if (err != Z_OK) return err; + + stream.next_out = dest; + stream.avail_out = 0; + + do { + if (stream.avail_out == 0) { + stream.avail_out = left > (uLong)max ? max : (uInt)left; + left -= stream.avail_out; + } + if (stream.avail_in == 0) { + stream.avail_in = len > (uLong)max ? max : (uInt)len; + len -= stream.avail_in; + } + err = inflate(&stream, Z_NO_FLUSH); + } while (err == Z_OK); + + *sourceLen -= len + stream.avail_in; + if (dest != buf) + *destLen = stream.total_out; + else if (stream.total_out && err == Z_BUF_ERROR) + left = 1; + + inflateEnd(&stream); + return err == Z_STREAM_END ? Z_OK : + err == Z_NEED_DICT ? Z_DATA_ERROR : + err == Z_BUF_ERROR && left + stream.avail_out ? Z_DATA_ERROR : + err; +} + +int ZEXPORT uncompress (dest, destLen, source, sourceLen) + Bytef *dest; + uLongf *destLen; + const Bytef *source; + uLong sourceLen; +{ + return uncompress2(dest, destLen, source, &sourceLen); +} diff --git a/deps/zlib-1.2.11/src/zconf.h b/deps/zlib-1.2.11/src/zconf.h new file mode 100644 index 000000000000..77398c11a1e2 --- /dev/null +++ b/deps/zlib-1.2.11/src/zconf.h @@ -0,0 +1,534 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols and init macros */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# define adler32_z z_adler32_z +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define crc32_z z_crc32_z +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateGetDictionary z_deflateGetDictionary +# define deflateInit z_deflateInit +# define deflateInit2 z_deflateInit2 +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzfread z_gzfread +# define gzfwrite z_gzfwrite +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzvprintf z_gzvprintf +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit z_inflateBackInit +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCodesUsed z_inflateCodesUsed +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetDictionary z_inflateGetDictionary +# define inflateGetHeader z_inflateGetHeader +# define inflateInit z_inflateInit +# define inflateInit2 z_inflateInit2 +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateResetKeep z_inflateResetKeep +# define inflateSetDictionary z_inflateSetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateValidate z_inflateValidate +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# define uncompress2 z_uncompress2 +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +#ifdef Z_SOLO + typedef unsigned long z_size_t; +#else +# define z_longlong long long +# if defined(NO_SIZE_T) + typedef unsigned NO_SIZE_T z_size_t; +# elif defined(STDC) +# include + typedef size_t z_size_t; +# else + typedef unsigned long z_size_t; +# endif +# undef z_longlong +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus about 7 kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#if 1 /* was set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#if 1 /* was set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff --git a/deps/zlib-1.2.11/src/zlib.h b/deps/zlib-1.2.11/src/zlib.h new file mode 100644 index 000000000000..f09cdaf1e054 --- /dev/null +++ b/deps/zlib-1.2.11/src/zlib.h @@ -0,0 +1,1912 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). +*/ + +#ifndef ZLIB_H +#define ZLIB_H + +#include "zconf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZLIB_VERSION "1.2.11" +#define ZLIB_VERNUM 0x12b0 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 2 +#define ZLIB_VER_REVISION 11 +#define ZLIB_VER_SUBREVISION 0 + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. + + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output + (providing more output space) before each call. + + The compressed data format used by default by the in-memory functions is + the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + around a deflate stream, which is itself documented in RFC 1951. + + The library also supports reading and writing files in gzip (.gz) format + with an interface similar to that of stdio using the functions that start + with "gz". The gzip format is different from the zlib format. gzip is a + gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + + This library can optionally read and write gzip and raw deflate streams in + memory as well. + + The zlib format was designed to be compact and fast for use in memory + and on communications channels. The gzip format was designed for single- + file compression on file systems, has a larger header than zlib to maintain + directory information, and uses a different, slower check method than zlib. + + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in the case of corrupted input. +*/ + +typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); +typedef void (*free_func) OF((voidpf opaque, voidpf address)); + +struct internal_state; + +typedef struct z_stream_s { + z_const Bytef *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total number of input bytes read so far */ + + Bytef *next_out; /* next output byte will go here */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total number of bytes output so far */ + + z_const char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: binary or text + for deflate, or the decoding state for inflate */ + uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream FAR *z_streamp; + +/* + gzip header information passed to and from zlib routines. See RFC 1952 + for more details on the meanings of these fields. +*/ +typedef struct gz_header_s { + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef *extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ + uInt extra_max; /* space at extra (only when reading header) */ + Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max; /* space at name (only when reading header) */ + Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max; /* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + when writing a gzip file) */ +} gz_header; + +typedef gz_header FAR *gz_headerp; + +/* + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + If zlib is used in a multi-threaded application, zalloc and zfree must be + thread safe. In that case, zlib is thread-safe. When zalloc and zfree are + Z_NULL on entry to the initialization function, they are set to internal + routines that use the standard library functions malloc() and free(). + + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use by the decompressor (particularly + if the decompressor wants to decompress everything in a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_SYNC_FLUSH 2 +#define Z_FULL_FLUSH 3 +#define Z_FINISH 4 +#define Z_BLOCK 5 +#define Z_TREES 6 +/* Allowed flush values; see deflate() and inflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_RLE 3 +#define Z_FIXED 4 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_TEXT 1 +#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ +#define Z_UNKNOWN 2 +/* Possible values of the data_type field for deflate() */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() +/* for compatibility with versions < 1.0.2 */ + + + /* basic functions */ + +ZEXTERN const char * ZEXPORT zlibVersion OF((void)); +/* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. + */ + +/* +ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). +*/ + + +ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); +/* + deflate compresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. deflate performs one or both of the + following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary. Some output may be provided even if + flush is zero. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. See deflatePending(), + which can be used if desired to determine whether or not there is more ouput + in that case. + + Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + decide how much data to accumulate before producing output, in order to + maximize compression. + + If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + flushed to the output buffer and the output is aligned on a byte boundary, so + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed + codes block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. + + If flush is set to Z_FULL_FLUSH, all output is flushed as with + Z_SYNC_FLUSH, and the compression state is reset so that decompression can + restart from this point if previous compressed data has been damaged or if + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + compression. + + If deflate returns with avail_out == 0, this function must be called again + with the same value of the flush parameter and more output space (updated + avail_out), until the flush is complete (deflate returns with non-zero + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out is greater than six to avoid repeated flush markers due to + avail_out == 0 on return. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this + function must be called again with Z_FINISH and more output space (updated + avail_out) but no more input data, until it returns with Z_STREAM_END or an + error. After deflate has returned Z_STREAM_END, the only possible operations + on the stream are deflateReset or deflateEnd. + + Z_FINISH can be used in the first deflate call after deflateInit if all the + compression is to be done in a single step. In order to complete in one + call, avail_out must be at least the value returned by deflateBound (see + below). Then deflate is guaranteed to return Z_STREAM_END. If not enough + output space is provided, deflate will not return Z_STREAM_END, and it must + be called again as described above. + + deflate() sets strm->adler to the Adler-32 checksum of all input read + so far (that is, total_in bytes). If a gzip stream is being generated, then + strm->adler will be the CRC-32 checksum of the input read so far. (See + deflateInit2 below.) + + deflate() may update strm->data_type if it can make a good guess about + the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is + considered binary. This field is only for information purposes and does not + affect the compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was Z_NULL or the state was inadvertently written over + by the application), or Z_BUF_ERROR if no progress is possible (for example + avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and + deflate() can be called again with more input and more output space to + continue compressing. +*/ + + +ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be + deallocated). +*/ + + +/* +ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); + + Initializes the internal stream state for decompression. The fields + next_in, avail_in, zalloc, zfree and opaque must be initialized before by + the caller. In the current version of inflate, the provided input is not + read or consumed. The allocation of a sliding window will be deferred to + the first call of inflate (if the decompression does not complete on the + first call). If zalloc and zfree are set to Z_NULL, inflateInit updates + them to use default allocation functions. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression. + Actual decompression will be done by inflate(). So next_in, and avail_in, + next_out, and avail_out are unused and unchanged. The current + implementation of inflateInit() does not process any header information -- + that is deferred until inflate() is called. +*/ + + +ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); +/* + inflate decompresses as much data as possible, and stops when the input + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when + forced to flush. + + The detailed semantics are as follows. inflate performs one or both of the + following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), then next_in and avail_in are updated + accordingly, and processing will resume at this point for the next call of + inflate(). + + - Generate more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. If the + caller of inflate() does not provide both available input and available + output space, it is possible that there will be no progress made. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. + + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. + + The Z_BLOCK option assists in appending to or combining deflate streams. + To assist in this, on return inflate() always sets strm->data_type to the + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. + + In this implementation, inflate() always flushes as much output as + possible to the output buffer, and always uses the faster approach on the + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. + + If a preset dictionary is needed after this call (see inflateSetDictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary + chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + strm->adler to the Adler-32 checksum of all output produced so far (that is, + total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + below. At the end of the stream, inflate() checks that its computed Adler-32 + checksum is equal to that saved by the compressor and returns Z_STREAM_END + only if the checksum is correct. + + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained unless inflateGetHeader() is used. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + produced so far. The CRC-32 is checked against the gzip trailer, as is the + uncompressed length, modulo 2^32. + + inflate() returns Z_OK if some progress has been made (more input processed + or more output produced), Z_STREAM_END if the end of the compressed data has + been reached and all uncompressed output has been produced, Z_NEED_DICT if a + preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + corrupted (input stream not conforming to the zlib format or incorrect check + value, in which case strm->msg points to a string with a more specific + error), Z_STREAM_ERROR if the stream structure was inconsistent (for example + next_in or next_out was Z_NULL, or the state was inadvertently written over + by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR + if no progress was possible or if there was not enough room in the output + buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + inflate() can be called again with more input and more output space to + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is to be attempted. +*/ + + +ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any pending + output. + + inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state + was inconsistent. +*/ + + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy)); + + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by the + caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if + deflateInit is used instead. + + For the current implementation of deflate(), a windowBits value of 8 (a + window size of 256 bytes) is not supported. As a result, a request for 8 + will result in 9 (a 512-byte window). In that case, providing 8 to + inflateInit2() will result in an error when the zlib header with 9 is + checked against the initialization of inflate(). The remedy is to not use 8 + with deflateInit2() with this initialization, or at least in that case use 9 + with inflateInit2(). + + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data + with no zlib header or trailer, and will not compute a check value. + + windowBits can also be greater than 15 for optional gzip encoding. Add + 16 to windowBits to write a simple gzip header and trailer around the + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to the appropriate value, + if the operating system was determined at compile time. If a gzip stream is + being written, strm->adler is a CRC-32 instead of an Adler-32. + + For raw deflate or gzip encoding, a request for a 256-byte window is + rejected as invalid, since only the zlib header provides a means of + transmitting the window size to the decompressor. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match), or Z_RLE to limit match distances to one (run-length + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman + coding and less string matching; it is somewhat intermediate between + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the compression dictionary from the given byte sequence + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). + + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and can be + predicted with good accuracy; the data can then be compressed better than + with the default empty dictionary. + + Depending on the size of the compression data structures selected by + deflateInit or deflateInit2, a part of the dictionary may in effect be + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. + + Upon return of this function, strm->adler is set to the Adler-32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The Adler-32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) If a raw deflate was requested, then the + Adler-32 value is not computed and strm->adler is not set. + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent (for example if deflate has already been called for this stream + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). +*/ + +ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by deflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If deflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + deflateGetDictionary() may return a length less than the window size, even + when more than the window size in input has been provided. It may return up + to 258 bytes less in that case, due to how zlib's implementation of deflate + manages the sliding window and lookahead for matches, where matches can be + up to 258 bytes long. If the application needs the last window-size bytes of + input, then that would need to be saved by the application outside of zlib. + + deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); +/* + This function is equivalent to deflateEnd followed by deflateInit, but + does not free and reallocate the internal compression state. The stream + will leave the compression level and any other attributes that may have been + set unchanged. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, + int level, + int strategy)); +/* + Dynamically update the compression level and compression strategy. The + interpretation of level and strategy is as in deflateInit2(). This can be + used to switch between compression and straight copy of the input data, or + to switch to a different kind of input data requiring a different strategy. + If the compression approach (which is a function of the level) or the + strategy is changed, and if any input has been consumed in a previous + deflate() call, then the input available so far is compressed with the old + level and strategy using deflate(strm, Z_BLOCK). There are three approaches + for the compression levels 0, 1..3, and 4..9 respectively. The new level + and strategy will take effect at the next call of deflate(). + + If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does + not have enough output space to complete, then the parameter change will not + take effect. In this case, deflateParams() can be called again with the + same parameters and more output space to try again. + + In order to assure a change in the parameters on the first try, the + deflate stream should be flushed using deflate() with Z_BLOCK or other flush + request until strm.avail_out is not zero, before calling deflateParams(). + Then no more input data should be provided before the deflateParams() call. + If this is done, the old level and strategy will be applied to the data + compressed before deflateParams(), and the new level and strategy will be + applied to the the data compressed after deflateParams(). + + deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream + state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if + there was not enough output space to complete the compression of the + available input data before a change in the strategy or approach. Note that + in the case of a Z_BUF_ERROR, the parameters are not changed. A return + value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be + retried with more output space. +*/ + +ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, + int good_length, + int max_lazy, + int nice_length, + int max_chain)); +/* + Fine tune deflate's internal compression parameters. This should only be + used by someone who understands the algorithm used by zlib's deflate for + searching for the best matching string, and even then only by the most + fanatic optimizer trying to squeeze out the last compressed bit for their + specific input data. Read the deflate.c source code for the meaning of the + max_lazy, good_length, nice_length, and max_chain parameters. + + deflateTune() can be called after deflateInit() or deflateInit2(), and + returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + */ + +ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, + uLong sourceLen)); +/* + deflateBound() returns an upper bound on the compressed size after + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. +*/ + +ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, + unsigned *pending, + int *bits)); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + +ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + deflatePrime() inserts bits in the deflate output stream. The intent + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. + + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, + gz_headerp head)); +/* + deflateSetHeader() provides gzip header information for when a gzip + stream is requested by deflateInit2(). deflateSetHeader() may be called + after deflateInit2() or deflateReset() and before the first call of + deflate(). The text, time, os, extra field, name, and comment information + in the provided gz_header structure are written to the gzip header (xflag is + ignored -- the extra flags are set according to the compression level). The + caller must assure that, if not Z_NULL, name and comment are terminated with + a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + available there. If hcrc is true, a gzip header crc is included. Note that + the current versions of the command-line version of gzip (up through version + 1.3.x) do not support header crc's, and will report that it is a "multi-part + gzip file" and give up. + + If deflateSetHeader is not used, the default gzip header has text false, + the time set to zero, and os set to 255, with no extra, name, or comment + fields. The gzip header is returned to the default state by deflateReset(). + + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, + int windowBits)); + + This is another version of inflateInit with an extra parameter. The + fields next_in, avail_in, zalloc, zfree and opaque must be initialized + before by the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value + provided to deflateInit2() while compressing, or it must be equal to 15 if + deflateInit2() was not used. If a compressed stream with a larger window + size is given as input, inflate() will return with the error code + Z_DATA_ERROR instead of trying to allocate a larger window. + + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, + not looking for a zlib or gzip header, not generating a check value, and not + looking for any check values for comparison at the end of the stream. This + is for use with other formats that use the deflate compressed data format + such as zip. Those formats provide their own check values. If a custom + format is developed using the raw deflate format for compressed data, it is + recommended that a check value such as an Adler-32 or a CRC-32 be applied to + the uncompressed data as is done in the zlib, gzip, and zip formats. For + most applications, the zlib format should be used as is. Note that comments + above on the use in deflateInit2() applies to the magnitude of windowBits. + + windowBits can also be greater than 15 for optional gzip decoding. Add + 32 to windowBits to enable zlib and gzip decoding with automatic header + detection, or add 16 to decode only the gzip format (the zlib format will + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see + below), inflate() will not automatically decode concatenated gzip streams. + inflate() will return Z_STREAM_END at the end of the gzip stream. The state + would need to be reset to continue decoding a subsequent gzip stream. + + inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. +*/ + +ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the decompression dictionary from the given uncompressed byte + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + can be determined from the Adler-32 value returned by that call of inflate. + The compressor and decompressor must use exactly the same dictionary (see + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. + + inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is + inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + expected one (incorrect Adler-32 value). inflateSetDictionary does not + perform any decompression: this will be done by subsequent calls of + inflate(). +*/ + +ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); +/* + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. + + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current current value of + total_in which indicates where valid compressed data was found. In the + error case, the application may repeatedly call inflateSync, providing more + input each time, until success or end of the input data. +*/ + +ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. + + This function can be useful when randomly accessing a large stream. The + first pass through the stream can periodically record the inflate state, + allowing restarting inflate at those points when randomly accessing the + stream. + + inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being Z_NULL). msg is left unchanged in both source and + destination. +*/ + +ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); +/* + This function is equivalent to inflateEnd followed by inflateInit, + but does not free and reallocate the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. + + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, + int windowBits)); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. If the window size is changed, then the + memory allocated for the window is freed, and the window will be reallocated + by inflate() if needed. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. +*/ + +ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, + int bits, + int value)); +/* + This function inserts bits in the inflate input stream. The intent is + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. + + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above, or -65536 if the provided + source stream state was inconsistent. +*/ + +ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, + gz_headerp head)); +/* + inflateGetHeader() requests that gzip header information be stored in the + provided gz_header structure. inflateGetHeader() may be called after + inflateInit2() or inflateReset(), and before the first call of inflate(). + As inflate() processes the gzip stream, head->done is zero until the header + is completed, at which time head->done is set to one. If a zlib stream is + being decoded, then head->done is set to -1 to indicate that there will be + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. + + The text, time, xflags, and os fields are filled in with the gzip header + contents. hcrc is set to true if there is a header CRC. (The header CRC + was valid if done is set to one.) If extra is not Z_NULL, then extra_max + contains the maximum number of bytes to write to extra. Once done is true, + extra_len contains the actual extra field length, and extra contains the + extra field, or that field truncated if extra_max is less than extra_len. + If name is not Z_NULL, then up to name_max characters are written there, + terminated with a zero unless the length is greater than name_max. If + comment is not Z_NULL, then up to comm_max characters are written there, + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its + absence. This allows the use of deflateSetHeader() with the returned + structure to duplicate the header. However if those fields are set to + allocated memory, then the application will need to save those pointers + elsewhere so that they can be eventually freed. + + If inflateGetHeader is not used, then the header information is simply + discarded. The header is always checked for validity, including the header + CRC if present. inflateReset() will reset the process to discard the header + information. The application would need to call inflateGetHeader() again to + retrieve the header from the next gzip stream. + + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. +*/ + +/* +ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, + unsigned char FAR *window)); + + Initialize the internal stream state for decompression using inflateBack() + calls. The fields zalloc, zfree and opaque in strm must be initialized + before the call. If zalloc and zfree are Z_NULL, then the default library- + derived memory allocation routines are used. windowBits is the base two + logarithm of the window size, in the range 8..15. window is a caller + supplied buffer of that size. Except for special applications where it is + assured that deflate was used with small window sizes, windowBits must be 15 + and a 32K byte window must be supplied to be able to decompress general + deflate streams. + + See inflateBack() for the usage of these routines. + + inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. +*/ + +typedef unsigned (*in_func) OF((void FAR *, + z_const unsigned char FAR * FAR *)); +typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); + +ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); +/* + inflateBack() does a raw inflate with a single call using a call-back + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. + + inflateBackInit() must be called first to allocate the internal state + and to initialize the state with the user-provided window buffer. + inflateBack() may then be used multiple times to inflate a complete, raw + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. + + A raw deflate stream is one with no zlib or gzip header or trailer. + This routine would normally be used in a utility that reads zip or gzip + files and writes out uncompressed files. The utility would decode the + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the default + behavior of inflate(), which expects a zlib header and trailer around the + deflate stream. + + inflateBack() uses two subroutines supplied by the caller that are then + called by inflateBack() for input and output. inflateBack() calls those + routines until it reads a complete deflate stream and writes out all of the + uncompressed data, or until it encounters an error. The function's + parameters and return types are defined above in the in_func and out_func + typedefs. inflateBack() will call in(in_desc, &buf) which should return the + number of bytes of provided input, and a pointer to that input in buf. If + there is no input available, in() must return zero -- buf is ignored in that + case -- and inflateBack() will return a buffer error. inflateBack() will + call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. + out() should return zero on success, or non-zero on failure. If out() + returns non-zero, inflateBack() will return with an error. Neither in() nor + out() are permitted to change the contents of the window provided to + inflateBackInit(), which is also the buffer that out() uses to write from. + The length written by out() will be at most the window size. Any non-zero + amount of input may be provided by in(). + + For convenience, inflateBack() can be provided input on the first call by + setting strm->next_in and strm->avail_in. If that input is exhausted, then + in() will be called. Therefore strm->next_in must be initialized before + calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + must also be initialized, and then if strm->avail_in is not zero, input will + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + + The in_desc and out_desc parameters of inflateBack() is passed as the + first parameter of in() and out() respectively when they are called. These + descriptors can be optionally used to pass any information that the caller- + supplied in() and out() functions need to do their job. + + On return, inflateBack() will set strm->next_in and strm->avail_in to + pass back any unused input that was provided by the last in() call. The + return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. +*/ + +ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); +/* + All memory allocated by inflateBackInit() is freed. + + inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + state was inconsistent. +*/ + +ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); +/* Return flags indicating compile-time options. + + Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + 1.0: size of uInt + 3.2: size of uLong + 5.4: size of voidpf (pointer) + 7.6: size of z_off_t + + Compiler, assembler, and debug options: + 8: ZLIB_DEBUG + 9: ASMV or ASMINF -- use ASM code + 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + 11: 0 (reserved) + + One-time table building (smaller code, but not thread-safe if true): + 12: BUILDFIXED -- build static block decoding tables when needed + 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + 14,15: 0 (reserved) + + Library content (indicates missing functionality): + 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + deflate code when not needed) + 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + and decode gzip streams (to avoid linking crc code) + 18-19: 0 (reserved) + + Operation variations (changes in library functionality): + 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + 21: FASTEST -- deflate algorithm with only one, lowest compression level + 22,23: 0 (reserved) + + The sprintf variant used by gzprintf (zero is best): + 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + + Remainder: + 27-31: 0 (reserved) + */ + +#ifndef Z_SOLO + + /* utility functions */ + +/* + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. +*/ + +ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Compresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. compress() is equivalent to compress2() with a level + parameter of Z_DEFAULT_COMPRESSION. + + compress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer. +*/ + +ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen, + int level)); +/* + Compresses the source buffer into the destination buffer. The level + parameter has the same meaning as in deflateInit. sourceLen is the byte + length of the source buffer. Upon entry, destLen is the total size of the + destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressed data. + + compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_BUF_ERROR if there was not enough room in the output buffer, + Z_STREAM_ERROR if the level parameter is invalid. +*/ + +ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); +/* + compressBound() returns an upper bound on the compressed size after + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. +*/ + +ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); +/* + Decompresses the source buffer into the destination buffer. sourceLen is + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed data. + + uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_BUF_ERROR if there was not enough room in the output + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. +*/ + +ZEXTERN int ZEXPORT uncompress2 OF((Bytef *dest, uLongf *destLen, + const Bytef *source, uLong *sourceLen)); +/* + Same as uncompress, except that sourceLen is a pointer, where the + length of the source is *sourceLen. On return, *sourceLen is the number of + source bytes consumed. +*/ + + /* gzip file access functions */ + +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ + +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ + +/* +ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); + + Opens a gzip (.gz) file for reading or writing. The mode parameter is as + in fopen ("rb" or "wb") but can also include a compression level ("wb9") or + a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only + compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' + for fixed code compression as in "wb9F". (See the description of + deflateInit2 for more information about the strategy parameter.) 'T' will + request transparent writing or appending with no compression and not using + the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. + + gzopen can be used to read a file which is not in gzip format; in this + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. + + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ + +ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); +/* + gzdopen associates a gzFile with the file descriptor fd. File descriptors + are obtained from calls like open, dup, creat, pipe or fileno (if the file + has been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); +/* + Set the internal buffer size used by this library's functions. The + default buffer size is 8192 bytes. This function must be called after + gzopen() or gzdopen(), and before any other calls that read or write the + file. The buffer memory allocation is always deferred to the first read or + write. Three times that size in buffer space is allocated. A larger buffer + size of, for example, 64K or 128K bytes will noticeably increase the speed + of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. +*/ + +ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); +/* + Dynamically update the compression level or strategy. See the description + of deflateInit2 for the meaning of these parameters. Previously provided + data is flushed before the parameter change. + + gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not + opened for writing, Z_ERRNO if there is an error writing the flushed data, + or Z_MEM_ERROR if there is a memory allocation error. +*/ + +ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); +/* + Reads the given number of uncompressed bytes from the compressed file. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. + + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. If len is too large to fit in an int, + then nothing is read, -1 is returned, and the error state is set to + Z_STREAM_ERROR. +*/ + +ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, + gzFile file)); +/* + Read up to nitems items of size size from file to buf, otherwise operating + as gzread() does. This duplicates the interface of stdio's fread(), with + size_t request and return types. If the library defines size_t, then + z_size_t is identical to size_t. If not, then z_size_t is an unsigned + integer type that can contain a pointer. + + gzfread() returns the number of full items read of size size, or zero if + the end of the file was reached and a full item could not be read, or if + there was an error. gzerror() must be consulted if zero is returned in + order to determine if there was an error. If the multiplication of size and + nitems overflows, i.e. the product does not fit in a z_size_t, then nothing + is read, zero is returned, and the error state is set to Z_STREAM_ERROR. + + In the event that the end of file is reached and only a partial item is + available at the end, i.e. the remaining uncompressed data length is not a + multiple of size, then the final partial item is nevetheless read into buf + and the end-of-file flag is set. The length of the partial item read is not + provided, but could be inferred from the result of gztell(). This behavior + is the same as the behavior of fread() implementations in common libraries, + but it prevents the direct use of gzfread() to read a concurrently written + file, reseting and retrying on end-of-file, when size is not 1. +*/ + +ZEXTERN int ZEXPORT gzwrite OF((gzFile file, + voidpc buf, unsigned len)); +/* + Writes the given number of uncompressed bytes into the compressed file. + gzwrite returns the number of uncompressed bytes written or 0 in case of + error. +*/ + +ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size, + z_size_t nitems, gzFile file)); +/* + gzfwrite() writes nitems items of size size from buf to file, duplicating + the interface of stdio's fwrite(), with size_t request and return types. If + the library defines size_t, then z_size_t is identical to size_t. If not, + then z_size_t is an unsigned integer type that can contain a pointer. + + gzfwrite() returns the number of full items written of size size, or zero + if there was an error. If the multiplication of size and nitems overflows, + i.e. the product does not fit in a z_size_t, then nothing is written, zero + is returned, and the error state is set to Z_STREAM_ERROR. +*/ + +ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); +/* + Converts, formats, and writes the arguments to the compressed file under + control of the format string, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or a negative zlib error code in case + of error. The number of uncompressed bytes written is limited to 8191, or + one less than the buffer size given to gzbuffer(). The caller should assure + that this limit is not exceeded. If it is exceeded, then gzprintf() will + return an error (0) with nothing written. In this case, there may also be a + buffer overflow with unpredictable consequences, which is possible only if + zlib was compiled with the insecure functions sprintf() or vsprintf() + because the secure snprintf() or vsnprintf() functions were not available. + This can be determined using zlibCompileFlags(). +*/ + +ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); +/* + Writes the given null-terminated string to the compressed file, excluding + the terminating null character. + + gzputs returns the number of characters written, or -1 in case of error. +*/ + +ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); +/* + Reads bytes from the compressed file until len-1 characters are read, or a + newline character is read and transferred to buf, or an end-of-file + condition is encountered. If any characters are read or if len == 1, the + string is terminated with a null character. If no characters are read due + to an end-of-file or len < 1, then the buffer is left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. +*/ + +ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); +/* + Writes c, converted to an unsigned char, into the compressed file. gzputc + returns the value that was written, or -1 in case of error. +*/ + +ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); +/* + Reads one byte from the compressed file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. +*/ + +ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); +/* + Push one character back onto the stream to be read as the first character + on the next read. At least one character of push-back is allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). +*/ + +ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); +/* + Flushes all pending output into the compressed file. The parameter flush + is as in the deflate() function. The return value is the zlib error number + (see function gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatenated gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, + z_off_t offset, int whence)); + + Sets the starting position for the next gzread or gzwrite on the given + compressed file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); + the value SEEK_END is not supported. + + If the file is opened for reading, this function is emulated but can be + extremely slow. If the file is opened for writing, only forward seeks are + supported; gzseek then compresses a sequence of zeroes up to the new + starting position. + + gzseek returns the resulting offset location as measured in bytes from + the beginning of the uncompressed stream, or -1 in case of error, in + particular if the file is opened for writing and the new starting position + would be before the current position. +*/ + +ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); +/* + Rewinds the given file. This function is supported only for reading. + + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); + + Returns the starting position for the next gzread or gzwrite on the given + compressed file. This position represents a number of bytes in the + uncompressed data stream, and is zero when starting, even if appending or + reading a gzip stream from the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + +/* +ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); + + Returns the current offset in the file being read or written. This offset + includes the count of bytes that precede the gzip stream, for example when + appending or when using gzdopen() for reading. When reading, the offset + does not include as yet unused buffered input. This information can be used + for a progress indicator. On error, gzoffset() returns -1. +*/ + +ZEXTERN int ZEXPORT gzeof OF((gzFile file)); +/* + Returns true (1) if the end-of-file indicator has been set while reading, + false (0) otherwise. Note that the end-of-file indicator is set only if the + read tried to go past the end of the input, but came up short. Therefore, + just like feof(), gzeof() may return false even if there is no more data to + read, in the event that the last read request was for the exact number of + bytes remaining in the input file. This will happen if the input file size + is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. +*/ + +ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); +/* + Returns true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) +*/ + +ZEXTERN int ZEXPORT gzclose OF((gzFile file)); +/* + Flushes all pending output if necessary, closes the compressed file and + deallocates the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. +*/ + +ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); +/* + Returns the error message for the last error which occurred on the given + compressed file. errnum is set to zlib error number. If an error occurred + in the file system and not in the compression library, errnum is set to + Z_ERRNO and the application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. +*/ + +ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); +/* + Clears the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip + file that is being written concurrently. +*/ + +#endif /* !Z_SOLO */ + + /* checksum functions */ + +/* + These functions are not related to compression but are exported + anyway because they might be useful in applications using the compression + library. +*/ + +ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); +/* + Update a running Adler-32 checksum with the bytes buf[0..len-1] and + return the updated checksum. If buf is Z_NULL, this function returns the + required initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed + much faster. + + Usage example: + + uLong adler = adler32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + adler = adler32(adler, buffer, length); + } + if (adler != original_adler) error(); +*/ + +ZEXTERN uLong ZEXPORT adler32_z OF((uLong adler, const Bytef *buf, + z_size_t len)); +/* + Same as adler32(), but with a size_t length. +*/ + +/* +ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, + z_off_t len2)); + + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. +*/ + +ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); +/* + Update a running CRC-32 with the bytes buf[0..len-1] and return the + updated CRC-32. If buf is Z_NULL, this function returns the required + initial value for the crc. Pre- and post-conditioning (one's complement) is + performed within this function so it shouldn't be done by the application. + + Usage example: + + uLong crc = crc32(0L, Z_NULL, 0); + + while (read_buffer(buffer, length) != EOF) { + crc = crc32(crc, buffer, length); + } + if (crc != original_crc) error(); +*/ + +ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf, + z_size_t len)); +/* + Same as crc32(), but with a size_t length. +*/ + +/* +ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); + + Combine two CRC-32 check values into one. For two sequences of bytes, + seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + len2. +*/ + + + /* various hacks, don't look :) */ + +/* deflateInit and inflateInit are macros to allow checking the zlib version + * and the compiler's view of z_stream: + */ +ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, + int windowBits, int memLevel, + int strategy, const char *version, + int stream_size)); +ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, + const char *version, int stream_size)); +ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, + unsigned char FAR *window, + const char *version, + int stream_size)); +#ifdef Z_PREFIX_SET +# define z_deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +# define z_inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +# define z_inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) +#else +# define deflateInit(strm, level) \ + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) +# define inflateInit(strm) \ + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) +# define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) +# define inflateInit2(strm, windowBits) \ + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) +# define inflateBackInit(strm, windowBits, window) \ + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, (int)sizeof(z_stream)) +#endif + +#ifndef Z_SOLO + +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) +#endif + +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); +#endif + +#else /* Z_SOLO */ + + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); + +#endif /* !Z_SOLO */ + +/* undocumented functions */ +ZEXTERN const char * ZEXPORT zError OF((int)); +ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); +ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int)); +ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp)); +ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); +ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); +#if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, + const char *mode)); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, + const char *format, + va_list va)); +# endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ZLIB_H */ diff --git a/deps/zlib-1.2.11/src/zlib.pc b/deps/zlib-1.2.11/src/zlib.pc new file mode 100644 index 000000000000..8009d6422a00 --- /dev/null +++ b/deps/zlib-1.2.11/src/zlib.pc @@ -0,0 +1,13 @@ +prefix=/usr/local +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +sharedlibdir=${libdir} +includedir=${prefix}/include + +Name: zlib +Description: zlib compression library +Version: 1.2.11 + +Requires: +Libs: -L${libdir} -L${sharedlibdir} -lz +Cflags: -I${includedir} diff --git a/deps/zlib-1.2.11/src/zutil.c b/deps/zlib-1.2.11/src/zutil.c new file mode 100644 index 000000000000..a76c6b0c7e55 --- /dev/null +++ b/deps/zlib-1.2.11/src/zutil.c @@ -0,0 +1,325 @@ +/* zutil.c -- target dependent utility functions for the compression library + * Copyright (C) 1995-2017 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#include "zutil.h" +#ifndef Z_SOLO +# include "gzguts.h" +#endif + +z_const char * const z_errmsg[10] = { + (z_const char *)"need dictionary", /* Z_NEED_DICT 2 */ + (z_const char *)"stream end", /* Z_STREAM_END 1 */ + (z_const char *)"", /* Z_OK 0 */ + (z_const char *)"file error", /* Z_ERRNO (-1) */ + (z_const char *)"stream error", /* Z_STREAM_ERROR (-2) */ + (z_const char *)"data error", /* Z_DATA_ERROR (-3) */ + (z_const char *)"insufficient memory", /* Z_MEM_ERROR (-4) */ + (z_const char *)"buffer error", /* Z_BUF_ERROR (-5) */ + (z_const char *)"incompatible version",/* Z_VERSION_ERROR (-6) */ + (z_const char *)"" +}; + + +const char * ZEXPORT zlibVersion() +{ + return ZLIB_VERSION; +} + +uLong ZEXPORT zlibCompileFlags() +{ + uLong flags; + + flags = 0; + switch ((int)(sizeof(uInt))) { + case 2: break; + case 4: flags += 1; break; + case 8: flags += 2; break; + default: flags += 3; + } + switch ((int)(sizeof(uLong))) { + case 2: break; + case 4: flags += 1 << 2; break; + case 8: flags += 2 << 2; break; + default: flags += 3 << 2; + } + switch ((int)(sizeof(voidpf))) { + case 2: break; + case 4: flags += 1 << 4; break; + case 8: flags += 2 << 4; break; + default: flags += 3 << 4; + } + switch ((int)(sizeof(z_off_t))) { + case 2: break; + case 4: flags += 1 << 6; break; + case 8: flags += 2 << 6; break; + default: flags += 3 << 6; + } +#ifdef ZLIB_DEBUG + flags += 1 << 8; +#endif +#if defined(ASMV) || defined(ASMINF) + flags += 1 << 9; +#endif +#ifdef ZLIB_WINAPI + flags += 1 << 10; +#endif +#ifdef BUILDFIXED + flags += 1 << 12; +#endif +#ifdef DYNAMIC_CRC_TABLE + flags += 1 << 13; +#endif +#ifdef NO_GZCOMPRESS + flags += 1L << 16; +#endif +#ifdef NO_GZIP + flags += 1L << 17; +#endif +#ifdef PKZIP_BUG_WORKAROUND + flags += 1L << 20; +#endif +#ifdef FASTEST + flags += 1L << 21; +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifdef NO_vsnprintf + flags += 1L << 25; +# ifdef HAS_vsprintf_void + flags += 1L << 26; +# endif +# else +# ifdef HAS_vsnprintf_void + flags += 1L << 26; +# endif +# endif +#else + flags += 1L << 24; +# ifdef NO_snprintf + flags += 1L << 25; +# ifdef HAS_sprintf_void + flags += 1L << 26; +# endif +# else +# ifdef HAS_snprintf_void + flags += 1L << 26; +# endif +# endif +#endif + return flags; +} + +#ifdef ZLIB_DEBUG +#include +# ifndef verbose +# define verbose 0 +# endif +int ZLIB_INTERNAL z_verbose = verbose; + +void ZLIB_INTERNAL z_error (m) + char *m; +{ + fprintf(stderr, "%s\n", m); + exit(1); +} +#endif + +/* exported to allow conversion of error code to string for compress() and + * uncompress() + */ +const char * ZEXPORT zError(err) + int err; +{ + return ERR_MSG(err); +} + +#if defined(_WIN32_WCE) + /* The Microsoft C Run-Time Library for Windows CE doesn't have + * errno. We define it as a global variable to simplify porting. + * Its value is always 0 and should not be used. + */ + int errno = 0; +#endif + +#ifndef HAVE_MEMCPY + +void ZLIB_INTERNAL zmemcpy(dest, source, len) + Bytef* dest; + const Bytef* source; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = *source++; /* ??? to be unrolled */ + } while (--len != 0); +} + +int ZLIB_INTERNAL zmemcmp(s1, s2, len) + const Bytef* s1; + const Bytef* s2; + uInt len; +{ + uInt j; + + for (j = 0; j < len; j++) { + if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; + } + return 0; +} + +void ZLIB_INTERNAL zmemzero(dest, len) + Bytef* dest; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = 0; /* ??? to be unrolled */ + } while (--len != 0); +} +#endif + +#ifndef Z_SOLO + +#ifdef SYS16BIT + +#ifdef __TURBOC__ +/* Turbo C in 16-bit mode */ + +# define MY_ZCALLOC + +/* Turbo C malloc() does not allow dynamic allocation of 64K bytes + * and farmalloc(64K) returns a pointer with an offset of 8, so we + * must fix the pointer. Warning: the pointer must be put back to its + * original form in order to free it, use zcfree(). + */ + +#define MAX_PTR 10 +/* 10*64K = 640K */ + +local int next_ptr = 0; + +typedef struct ptr_table_s { + voidpf org_ptr; + voidpf new_ptr; +} ptr_table; + +local ptr_table table[MAX_PTR]; +/* This table is used to remember the original form of pointers + * to large buffers (64K). Such pointers are normalized with a zero offset. + * Since MSDOS is not a preemptive multitasking OS, this table is not + * protected from concurrent access. This hack doesn't work anyway on + * a protected system like OS/2. Use Microsoft C instead. + */ + +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) +{ + voidpf buf; + ulg bsize = (ulg)items*size; + + (void)opaque; + + /* If we allocate less than 65520 bytes, we assume that farmalloc + * will return a usable pointer which doesn't have to be normalized. + */ + if (bsize < 65520L) { + buf = farmalloc(bsize); + if (*(ush*)&buf != 0) return buf; + } else { + buf = farmalloc(bsize + 16L); + } + if (buf == NULL || next_ptr >= MAX_PTR) return NULL; + table[next_ptr].org_ptr = buf; + + /* Normalize the pointer to seg:0 */ + *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; + *(ush*)&buf = 0; + table[next_ptr++].new_ptr = buf; + return buf; +} + +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) +{ + int n; + + (void)opaque; + + if (*(ush*)&ptr != 0) { /* object < 64K */ + farfree(ptr); + return; + } + /* Find the original pointer */ + for (n = 0; n < next_ptr; n++) { + if (ptr != table[n].new_ptr) continue; + + farfree(table[n].org_ptr); + while (++n < next_ptr) { + table[n-1] = table[n]; + } + next_ptr--; + return; + } + Assert(0, "zcfree: ptr not found"); +} + +#endif /* __TURBOC__ */ + + +#ifdef M_I86 +/* Microsoft C in 16-bit mode */ + +# define MY_ZCALLOC + +#if (!defined(_MSC_VER) || (_MSC_VER <= 600)) +# define _halloc halloc +# define _hfree hfree +#endif + +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) +{ + (void)opaque; + return _halloc((long)items, size); +} + +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) +{ + (void)opaque; + _hfree(ptr); +} + +#endif /* M_I86 */ + +#endif /* SYS16BIT */ + + +#ifndef MY_ZCALLOC /* Any system without a special alloc function */ + +#ifndef STDC +extern voidp malloc OF((uInt size)); +extern voidp calloc OF((uInt items, uInt size)); +extern void free OF((voidpf ptr)); +#endif + +voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) + voidpf opaque; + unsigned items; + unsigned size; +{ + (void)opaque; + return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : + (voidpf)calloc(items, size); +} + +void ZLIB_INTERNAL zcfree (opaque, ptr) + voidpf opaque; + voidpf ptr; +{ + (void)opaque; + free(ptr); +} + +#endif /* MY_ZCALLOC */ + +#endif /* !Z_SOLO */ diff --git a/deps/zlib-1.2.11/src/zutil.h b/deps/zlib-1.2.11/src/zutil.h new file mode 100644 index 000000000000..b079ea6a80f5 --- /dev/null +++ b/deps/zlib-1.2.11/src/zutil.h @@ -0,0 +1,271 @@ +/* zutil.h -- internal interface and configuration of the compression library + * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* @(#) $Id$ */ + +#ifndef ZUTIL_H +#define ZUTIL_H + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include "zlib.h" + +#if defined(STDC) && !defined(Z_SOLO) +# if !(defined(_WIN32_WCE) && defined(_MSC_VER)) +# include +# endif +# include +# include +#endif + +#ifdef Z_SOLO + typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */ +#endif + +#ifndef local +# define local static +#endif +/* since "static" is used to mean two completely different things in C, we + define "local" for the non-static meaning of "static", for readability + (compile with -Dlocal if your debugger can't find static symbols) */ + +typedef unsigned char uch; +typedef uch FAR uchf; +typedef unsigned short ush; +typedef ush FAR ushf; +typedef unsigned long ulg; + +extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ +/* (size given to avoid silly warnings with Visual C++) */ + +#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] + +#define ERR_RETURN(strm,err) \ + return (strm->msg = ERR_MSG(err), (err)) +/* To be used only when the state is known to be valid */ + + /* common constants */ + +#ifndef DEF_WBITS +# define DEF_WBITS MAX_WBITS +#endif +/* default windowBits for decompression. MAX_WBITS is for compression only */ + +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif +/* default memLevel */ + +#define STORED_BLOCK 0 +#define STATIC_TREES 1 +#define DYN_TREES 2 +/* The three kinds of block type */ + +#define MIN_MATCH 3 +#define MAX_MATCH 258 +/* The minimum and maximum match lengths */ + +#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ + + /* target dependencies */ + +#if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) +# define OS_CODE 0x00 +# ifndef Z_SOLO +# if defined(__TURBOC__) || defined(__BORLANDC__) +# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) + /* Allow compilation with ANSI keywords only enabled */ + void _Cdecl farfree( void *block ); + void *_Cdecl farmalloc( unsigned long nbytes ); +# else +# include +# endif +# else /* MSC or DJGPP */ +# include +# endif +# endif +#endif + +#ifdef AMIGA +# define OS_CODE 1 +#endif + +#if defined(VAXC) || defined(VMS) +# define OS_CODE 2 +# define F_OPEN(name, mode) \ + fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") +#endif + +#ifdef __370__ +# if __TARGET_LIB__ < 0x20000000 +# define OS_CODE 4 +# elif __TARGET_LIB__ < 0x40000000 +# define OS_CODE 11 +# else +# define OS_CODE 8 +# endif +#endif + +#if defined(ATARI) || defined(atarist) +# define OS_CODE 5 +#endif + +#ifdef OS2 +# define OS_CODE 6 +# if defined(M_I86) && !defined(Z_SOLO) +# include +# endif +#endif + +#if defined(MACOS) || defined(TARGET_OS_MAC) +# define OS_CODE 7 +# ifndef Z_SOLO +# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fdopen */ +# else +# ifndef fdopen +# define fdopen(fd,mode) NULL /* No fdopen() */ +# endif +# endif +# endif +#endif + +#ifdef __acorn +# define OS_CODE 13 +#endif + +#if defined(WIN32) && !defined(__CYGWIN__) +# define OS_CODE 10 +#endif + +#ifdef _BEOS_ +# define OS_CODE 16 +#endif + +#ifdef __TOS_OS400__ +# define OS_CODE 18 +#endif + +#ifdef __APPLE__ +# define OS_CODE 19 +#endif + +#if defined(_BEOS_) || defined(RISCOS) +# define fdopen(fd,mode) NULL /* No fdopen() */ +#endif + +#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX +# if defined(_WIN32_WCE) +# define fdopen(fd,mode) NULL /* No fdopen() */ +# ifndef _PTRDIFF_T_DEFINED + typedef int ptrdiff_t; +# define _PTRDIFF_T_DEFINED +# endif +# else +# define fdopen(fd,type) _fdopen(fd,type) +# endif +#endif + +#if defined(__BORLANDC__) && !defined(MSDOS) + #pragma warn -8004 + #pragma warn -8008 + #pragma warn -8066 +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_WIN32) && \ + (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +#endif + + /* common defaults */ + +#ifndef OS_CODE +# define OS_CODE 3 /* assume Unix */ +#endif + +#ifndef F_OPEN +# define F_OPEN(name, mode) fopen((name), (mode)) +#endif + + /* functions */ + +#if defined(pyr) || defined(Z_SOLO) +# define NO_MEMCPY +#endif +#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) + /* Use our own functions for small and medium model with MSC <= 5.0. + * You may have to use the same strategy for Borland C (untested). + * The __SC__ check is for Symantec. + */ +# define NO_MEMCPY +#endif +#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) +# define HAVE_MEMCPY +#endif +#ifdef HAVE_MEMCPY +# ifdef SMALL_MEDIUM /* MSDOS small or medium model */ +# define zmemcpy _fmemcpy +# define zmemcmp _fmemcmp +# define zmemzero(dest, len) _fmemset(dest, 0, len) +# else +# define zmemcpy memcpy +# define zmemcmp memcmp +# define zmemzero(dest, len) memset(dest, 0, len) +# endif +#else + void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); + int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); + void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); +#endif + +/* Diagnostic functions */ +#ifdef ZLIB_DEBUG +# include + extern int ZLIB_INTERNAL z_verbose; + extern void ZLIB_INTERNAL z_error OF((char *m)); +# define Assert(cond,msg) {if(!(cond)) z_error(msg);} +# define Trace(x) {if (z_verbose>=0) fprintf x ;} +# define Tracev(x) {if (z_verbose>0) fprintf x ;} +# define Tracevv(x) {if (z_verbose>1) fprintf x ;} +# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;} +# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +#ifndef Z_SOLO + voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, + unsigned size)); + void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); +#endif + +#define ZALLOC(strm, items, size) \ + (*((strm)->zalloc))((strm)->opaque, (items), (size)) +#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) +#define TRY_FREE(s, p) {if (p) ZFREE(s, p);} + +/* Reverse the bytes in a 32-bit value */ +#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ + (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) + +#endif /* ZUTIL_H */ diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg new file mode 100644 index 000000000000..01ba21ca8820 --- /dev/null +++ b/packaging/cfg/taos.cfg @@ -0,0 +1,181 @@ +######################################################## +# # +# TDengine Configuration # +# Any questions, please email support@taosdata.com # +# # +######################################################## + + +# Internal IP address of the server, which can be acquired by using ifconfig command. +# internalIp 127.0.0.1 + +# client local IP +# localIp 127.0.0.1 + +# data file's directory +# dataDir /var/lib/taos + +# log file's directory +# logDir /var/log/taos + +# http service port, default tcp[6020] +# httpPort 6020 + +# port for MNode connect to Client, default udp[6030-6034] tcp[6030] +# mgmtShellPort 6030 + +# port for DNode connect to Client, default udp[6035-6039] tcp[6035] +# vnodeShellPort 6035 + +# number of threads per CPU core +# numOfThreadsPerCore 1 + +# number of vnodes per core in DNode +# numOfVnodesPerCore 8 + +# number of total vnodes in DNode +# numOfTotalVnodes 0 + +# max number of sessions per vnode +# tables 1024 + +# cache block size +# cache 16384 + +# row in file block +# rows 4096 + +# average cache blocks per meter +# ablocks 4 + +# max number of cache blocks per Meter +# tblocks 512 + +# interval of system monitor +# monitorInterval 60 + +# RPC re-try timer, millisecond +# rpcTimer 300 + +# RPC maximum time for ack, seconds +# rpcMaxTime 600 + +# commit interval,unit is second +# ctime 3600 + +# interval of DNode report status to MNode, unit is Second +# statusInterval 1 + +# interval of Shell send HB to MNode, unit is Second +# shellActivityTimer 3 + +# time to keep MeterMeta in Cache, seconds +# meterMetaKeepTimer 7200 + +# time to keep MetricMeta in Cache, seconds +# metricMetaKeepTimer 600 + +# max number of users +# maxUsers 1000 + +# max number of databases +# maxDbs 1000 + +# max number of tables +# maxTables 650000 + +# system locale +# locale en_US.UTF-8 + +# default system charset +# charset UTF-8 + +# enable/disable commit log +# clog 1 + +# enable/disable async log +# asyncLog 1 + +# enable/disable compression +# comp 1 + +# number of days per DB file +# days 10 + +# number of days to keep DB file +# keep 3650 + +# client default database(database should be created) +# defaultDB + +# client default username +# defaultUser root + +# client default password +# defaultPass taosdata + +# max number of connections from client for mgmt node +# maxShellConns 2000 + +# max numerber of meter Connections +# maxMeterConnections 10000 + +# max connection to management node +# maxMgmtConnections 2000 + +# max connection to Vnode +# maxVnodeConnections 10000 + +# start http service in the cluster +# enableHttp 1 + +# start system monitor module in the cluster +# enableMonitor 1 + +# httpMaxThreads 2 + +# The following parameter is used to limit the maximum number of lines in log files. +# max number of rows per log filters +# numOfLogLines 10000000 + +# The following parameters are used for debug purpose only. +# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR +# 131: output warning and error,135: output info, warning and error to log. +# 199: output debug, info, warning and error to both screen and file + +# debug flag for meta management messages +# mDebugFlag 135 + +# debug flag for dnode messages +# dDebugFlag 131 + +# debug flag for TDengine SDB +# sdbDebugFlag 135 + +# debug flag for RPC +# taosDebugFlag 131 + +# debug flag for basic utils +# debugFlag 131 + +# debug flag for TDengine client +# cDebugFlag 131 + +# debug flag for http server +# httpDebugFlag 131 + +# debug flag for system monitor +# monitorDebugFlag 131 + +# debug flag for TAOS TIMER +# tmrDebugFlag 131 + +########################### data directory configuration ############################ +# option mount_path tier_level +# dataDir /mnt/disk1/taos 0 +# dataDir /mnt/disk2/taos 0 +# dataDir /mnt/disk3/taos 0 +# dataDir /mnt/disk4/taos 0 +# dataDir /mnt/disk5/taos 0 +# dataDir /mnt/disk6/taos 1 +# dataDir /mnt/disk7/taos 1 diff --git a/packaging/deb/DEBIAN/control b/packaging/deb/DEBIAN/control new file mode 100644 index 000000000000..c01640d7e9ad --- /dev/null +++ b/packaging/deb/DEBIAN/control @@ -0,0 +1,14 @@ +Package: tdengine +Version: 1.0.0 +Section: utils +Priority: optional +#Essential: no +#Depends: no +#Suggests: no +Architecture: amd64 +Installed-Size: 66666 +Maintainer: support@taosdata.com +Provides: taosdata +Homepage: http://taosdata.com +Description: Big Data Platform Designed and Optimized for IoT. + diff --git a/packaging/deb/DEBIAN/postinst b/packaging/deb/DEBIAN/postinst new file mode 100644 index 000000000000..08f3e513c7cf --- /dev/null +++ b/packaging/deb/DEBIAN/postinst @@ -0,0 +1,7 @@ +#!/bin/bash +#set -x +#path=`pwd` +insmetaPath="/usr/local/taos/script" +sudo chmod -R 744 ${insmetaPath} +cd ${insmetaPath} +sudo ./post.sh diff --git a/packaging/deb/DEBIAN/postrm b/packaging/deb/DEBIAN/postrm new file mode 100644 index 000000000000..05a7907cf5a4 --- /dev/null +++ b/packaging/deb/DEBIAN/postrm @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst new file mode 100644 index 000000000000..88b76568df48 --- /dev/null +++ b/packaging/deb/DEBIAN/preinst @@ -0,0 +1,28 @@ +#!/bin/bash + +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +# Stop the service if running +if pidof taosd &> /dev/null; then + if is_using_systemd; then + sudo systemctl stop taosd || : + else + sudo service taosd stop || : + fi + echo "Stop taosd service success!" + sleep 1 +fi + +# if taos.cfg already softlink, remove it +cfg_install_dir="/etc/taos" +install_main_dir="/usr/local/taos" +if [ -f ${cfg_install_dir}/taos.cfg ]; then + sudo rm -f ${install_main_dir}/cfg/taos.cfg || : +fi + diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm new file mode 100644 index 000000000000..943c02948ba2 --- /dev/null +++ b/packaging/deb/DEBIAN/prerm @@ -0,0 +1,9 @@ +#!/bin/bash + +#path=`pwd` +insmetaPath="/usr/local/taos/script" +cd ${insmetaPath} +#sudo chmod -R 744 ${insmetaPath} +sudo ./preun.sh + + diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh new file mode 100755 index 000000000000..c6076fcedc8a --- /dev/null +++ b/packaging/deb/makedeb.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# +# Generate deb package for ubuntu +#set -x + +#curr_dir=$(pwd) +compile_dir=$1 +output_dir=$2 +tdengine_ver=$3 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -m ${script_dir}/../..)" +pkg_dir="${top_dir}/debworkroom" + +#echo "curr_dir: ${curr_dir}" +#echo "top_dir: ${top_dir}" +#echo "script_dir: ${script_dir}" +echo "compile_dir: ${compile_dir}" +echo "pkg_dir: ${pkg_dir}" + +if [ -d ${pkg_dir} ]; then + rm -rf ${pkg_dir} +fi +mkdir -p ${pkg_dir} +cd ${pkg_dir} + +# create install dir +install_home_path="/usr/local/taos" +mkdir -p ${pkg_dir}${install_home_path}/bin +mkdir -p ${pkg_dir}${install_home_path}/cfg +mkdir -p ${pkg_dir}${install_home_path}/connector +mkdir -p ${pkg_dir}${install_home_path}/driver +mkdir -p ${pkg_dir}${install_home_path}/examples +mkdir -p ${pkg_dir}${install_home_path}/include +mkdir -p ${pkg_dir}${install_home_path}/init.d +mkdir -p ${pkg_dir}${install_home_path}/script + +cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg +cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d +cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script +cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script +cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/lib/libtaos.so ${pkg_dir}${install_home_path}/driver +cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include +cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples +cp -r ${top_dir}/src/connector/grafana ${pkg_dir}${install_home_path}/connector +cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector +cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector +cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector + +cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ +chmod 755 ${pkg_dir}/DEBIAN/* + +# modify version of control +debver="Version: "$tdengine_ver +sed -i "2c$debver" ${pkg_dir}/DEBIAN/control + +#get taos version, then set deb name +debname="tdengine-"${tdengine_ver}".deb" + +# make deb package +dpkg -b ${pkg_dir} $debname +echo "make deb package success!" + +cp ${pkg_dir}/*.deb ${output_dir} + +# clean tmep dir +rm -rf ${pkg_dir} + diff --git a/packaging/deb/taosd b/packaging/deb/taosd new file mode 100755 index 000000000000..192bc9ee9767 --- /dev/null +++ b/packaging/deb/taosd @@ -0,0 +1,89 @@ +#!/bin/bash +# +# Modified from original source: Elastic Search +# https://github.com/elasticsearch/elasticsearch +# Thank you to the Elastic Search authors +# +# chkconfig: 2345 99 01 +# +### BEGIN INIT INFO +# Provides: TDEngine +# Required-Start: $local_fs $network $syslog +# Required-Stop: $local_fs $network $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts TDEngine taosd +# Description: Starts TDEngine taosd, a time-series database engine +### END INIT INFO + +set -e + +PATH="/bin:/usr/bin:/sbin:/usr/sbin" +NAME="TDEngine" +USER="root" +GROUP="root" +DAEMON="/usr/local/bin/taos/taosd" +DAEMON_OPTS="" +PID_FILE="/var/run/$NAME.pid" +APPARGS="" + +# Maximum number of open files +MAX_OPEN_FILES=65535 + +. /lib/lsb/init-functions + +case "$1" in + start) + + log_action_begin_msg "Starting TDEngine..." + if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then + + touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE" + + if [ -n "$MAX_OPEN_FILES" ]; then + ulimit -n $MAX_OPEN_FILES + fi + + start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS + + log_end_msg $? + fi + ;; + + stop) + log_action_begin_msg "Stopping TDEngine..." + set +e + if [ -f "$PID_FILE" ]; then + start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null + if [ $? -eq 1 ]; then + log_action_cont_msg "TSD is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $PID_FILE`" + log_failure_msg "Failed to stop TDEngine (pid $PID)" + exit 1 + fi + rm -f "$PID_FILE" + else + log_action_cont_msg "TDEngine was not running" + fi + log_action_end_msg 0 + set -e + ;; + + restart|force-reload) + if [ -f "$PID_FILE" ]; then + $0 stop + sleep 1 + fi + $0 start + ;; + status) + status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME" + ;; + *) + # echo "Usage: /etc/init.d/opentsdb {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/packaging/docker/Dockerfile.tbase b/packaging/docker/Dockerfile.tbase new file mode 100644 index 000000000000..1c93b91ece14 --- /dev/null +++ b/packaging/docker/Dockerfile.tbase @@ -0,0 +1,27 @@ +# Parent image +FROM ubuntu + +# Set work directory +WORKDIR /app + +# Copy executable file to work directory +COPY ./bin/tdengine . + +# Copy configuration file to /etc/taos +COPY ./cfg/tdengine.cfg /etc/tdengine/ + +# Volume to communicate with host machine +# config file history file +VOLUME ["/etc/tdengine/", "/root"] + +# Command to run +ENTRYPOINT ["/app/tdengine"] + +# To build the image +# docker build -t taos_img -f Dockerfile.taos . + +# To run the image +# docker run -it --rm \ +# --mount type=bind,source=/etc/taos/,target=/etc/taos/ \ +# --mount type=bind,source="$HOME",target=/root \ +# taos_img -p diff --git a/packaging/docker/Dockerfile.tbased b/packaging/docker/Dockerfile.tbased new file mode 100644 index 000000000000..6b8d9c421607 --- /dev/null +++ b/packaging/docker/Dockerfile.tbased @@ -0,0 +1,37 @@ +# Parent image +FROM ubuntu + +# Install required libraries +RUN apt-get update && apt-get install -y libssl-dev + +# Set work directory +WORKDIR /app + +# COPY executable file to work directory +COPY ./bin/tdengined . + +# Copy configuration file to /etc/taos +COPY ./cfg/tdengine.cfg /etc/tdengine/ + +# Volume to communicate with host machine +# data file log file config file +VOLUME ["/var/lib/tdengine/", "/var/log/tdengine/", "/etc/tdengine/"] + +# Port to expose to outside of the world. +EXPOSE 6100 6120 6140 6160 6180 6200 6220 6240 6260 6280 6290 6300 8080 + +# Command to run +ENTRYPOINT ["/app/tdengined"] + +# To build the image +# docker build -t mnode_img -f Dockerfile.mnode . + +# To run mnode +# docker run --rm -it --name mnode \ +# -p 6100:6100 -p 6120:6120 -p 6220:6220 -p 6260:6260 -p 6280:6280 \ +# --mount source=taos_data,target=/var/lib/taos/ \ +# --mount source=taos_log,target=/var/log/taos/ \ +# --mount type=bind,source=/home/hzcheng/Documents/TAOS/taosdata/cfg/,target=/etc/taos/ \ +# --network isolated_nw --ip 172.25.0.10 \ +# mnode_img +# diff --git a/packaging/docker/README.md b/packaging/docker/README.md new file mode 100644 index 000000000000..94f95fa1a3f6 --- /dev/null +++ b/packaging/docker/README.md @@ -0,0 +1,25 @@ +# TAOS DATABASE Docker version + +# Realse docker version install package +To release a docker version install package, change directory to +_taosdata/tools/docker_ and run: +```shell +bash release_docker.sh +``` +Then it will generate a tar file in _release_ file. + +# Install TAOS DATA +To install taosdata, uncompress the _tar_ file in release directory and +run _install.sh_ +```shell +./install.sh # Install mnode and dnode +./install.sh all # Install mnode and dnode +./install.sh mnode # Install mnode +./install.sh dnode # Install dnode +``` + +# Check the services +To check if taosdata run correctly, use _docker_ commands. +```shell +docker container ls +``` diff --git a/packaging/docker/docker-compose.yml b/packaging/docker/docker-compose.yml new file mode 100755 index 000000000000..aa70d5542766 --- /dev/null +++ b/packaging/docker/docker-compose.yml @@ -0,0 +1,78 @@ +version: '3.2' + +services: + mnode: + # Build properties + build: + context: . + dockerfile: Dockerfile.mnode + image: mnode_img + # Deploy properties + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + ports: + - "6100:6100" + - "6120:6120" + - "6220:6220" + - "6260:6260" + - "6280:6280" + volumes: + - type: volume + source: taos_data + target: /var/lib/taos/ + - type: volume + source: taos_log + target: /var/log/taos/ + # And also configuration files. + network_mode: bridge + + dnode: + # Build properties + build: + context: . + dockerfile: Dockerfile.dnode + image: dnode_img + # Deploy properties + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + ports: + - "6140:6140" + - "6160:6160" + - "6180:6180" + - "6200:6200" + - "6240:6240" + volumes: + - type: volume + source: taos_data + target: /var/lib/taos/ + - type: volume + source: taos_log + target: /var/log/taos/ + # And also configuration files. + network_mode: bridge + + taos: + # Build properties + build: + context: . + dockerfile: Dockerfile.taos + image: taos_img + # Deploy properties + +volumes: + taos_data: + taos_log: + +# To build the images: docker-compose build diff --git a/packaging/docker/install.sh b/packaging/docker/install.sh new file mode 100755 index 000000000000..ef08289b4f65 --- /dev/null +++ b/packaging/docker/install.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# ******************************************************** +# Script to install docker-version TAOSDATA on computer +# ******************************************************** + +set -e + +# Global variables +tarFile="tdengine.tar.gz" + +headerDir="/usr/local/include/tdengine" +cfgDir="/etc/tdengine" +binDir="/usr/local/bin/tdengine" +libDir="/usr/local/lib/tdengine" +linkDir="/usr/bin" + +javaAppDir="connector" + + +# TODO: Function to install different parts. +make_directory() { + sudo mkdir -p ${cfgDir} ${headerDir} ${binDir} ${libDir} ${binDir}/connector + # Copy global configure file + sudo cp -n cfg/tdengine.cfg ${cfgDir} +} + +installTDengine() { + # TODO: check if program is installed + make_directory + # Build tdengined image + sudo docker container rm -f tdengined_img || true + sudo docker image rm tdengined_img || true + sudo docker build -t tdengined_img -f Dockerfile.tdengined . + # Deploy the service + sudo docker run -d --name tdengined --network="host" \ + --mount source=taos_data,target=/var/lib/tdengine/ \ + --mount source=taos_log,target=/var/log/tdengine/ \ + --mount type=bind,source=/etc/tdengine/,target=/etc/tdengine/ \ + --restart=always \ + tdengined_img +} + + +installOthers() { + # Update header file + sudo rm -f ${headerDir}/*.h && sudo cp inc/*.h ${headerDir} + + # Update lib file + sudo rm -f /usr/lib/libtaos.so /usr/lib/libtaos.a + sudo rm -f ${libDir}/* && sudo cp lib/* ${libDir} + sudo ln -s ${libDir}/libtaos.so /usr/lib/libtaos.so + sudo ln -s ${libDir}/libtaos.a /usr/lib/libtaos.a + + # Update JDBC + sudo rm -rf ${binDir}/connector/* + sudo cp JDBCDriver*-dist.* ${binDir}/connector 2> /dev/null || : + + # TODO: Install taos + sudo rm -f ${linkDir}/taos ${binDir}/taos.sh + sudo docker image rm taos_img || true + sudo docker build --no-cache -t taos_img -f Dockerfile.tdengine . + sudo echo '#!/bin/bash' > taos.sh + sudo echo >> taos.sh + sudo echo 'docker run -it --rm --network="host" \' >> taos.sh + sudo echo '--mount type=bind,source=/etc/tdengine/,target=/etc/tdengine/ \' >> taos.sh + sudo echo '--mount type=bind,source="$HOME",target=/root \' >> taos.sh + sudo echo 'taos_img $@' >> taos.sh + sudo mv taos.sh ${binDir} + sudo chmod a+x ${binDir}/taos.sh + sudo ln -s ${binDir}/taos.sh ${linkDir}/taos + + # Install remove.sh + sudo rm -f ${linkDir}/rmtaos ${binDir}/remove.sh + sudo cp bin/remove.sh ${binDir} + sudo chmod a+x ${binDir}/remove.sh + sudo ln -s ${binDir}/remove.sh ${linkDir}/rmtaos +} + +printInstallGuide() { + echo + echo "Type 'bash install.sh' to install management and data service" + echo "Type 'bash install.sh dnode' to install data service only" + echo "Type 'bash install.sh mgmt' to install management service only" +} + +# ----------------------- Main program ----------------------- +tar -zxf ${tarFile} + +installTDengine +installOthers + +rm -rf $(tar -tf ${tarFile}) diff --git a/packaging/docker/release_docker.sh b/packaging/docker/release_docker.sh new file mode 100755 index 000000000000..3748b78bdeaa --- /dev/null +++ b/packaging/docker/release_docker.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# ************************************************************** +# Script to release TAOS install package. +# ************************************************************** + +set -e + +currDir=$(pwd) +codeDir=$(readlink -m ${currDir}/../..) +rootDir=$(readlink -m ${codeDir}/..) +buildDir=$(readlink -m ${rootDir}/build) +releaseDir=$(readlink -m ${rootDir}/release) + + +# # --------------------Get version information +versionInfo="${codeDir}/util/src/version.c" +version=$(cat ${versionInfo} | grep version | cut -d '"' -f2) + +if [ "$1" != "test" ]; then + while true; do + read -p "Do you want to release a new version? [y/N]: " isVersionChange + + if [[ ( "${isVersionChange}" == "y") || ( "${isVersionChange}" == "Y") ]]; then + # TODO: Add version format check here. + read -p "Please enter the new version: " version + break + elif [[ ( "${isVersionChange}" == "n") || ( "${isVersionChange}" == "N") ]]; then + echo "Use old version ${version}" + break + else + continue + fi + done +fi + +buildTime=$(date +"%F %R") +echo "char version[64] = \"${version}\";" > ${versionInfo} +echo "char buildinfo[512] = \"Built by ${USER} at ${buildTime}\";" >> ${versionInfo} + +# --------------------------Make executable file. +cd ${codeDir} +make clean +make +cd ${currDir} + +# --------------------------Group files +# create compressed install file. +installDir="tdengine-docker-${version}-$(echo ${buildTime}| tr ': ' -)-${USER}" + +# directories and files. +binDir="bin" +libDir="lib" +headerDir="inc" +cfgDir="cfg" + +binFiles="${buildDir}/bin/tdengine ${buildDir}/bin/tdengined ${currDir}/remove.sh" +libFiles="${buildDir}/lib/libtaos.so ${buildDir}/lib/libtaos.a" +headerFiles="${codeDir}/inc/taos.h" +cfgFiles="${codeDir}/cfg/*" + +dockerFiles="${currDir}/Dockerfile.tdengined ${currDir}/Dockerfile.tdengine" +installFiles="${currDir}/install.sh" + +# make directories. +mkdir -p ${installDir} +mkdir -p ${installDir}/${binDir} && cp ${binFiles} ${installDir}/${binDir} +mkdir -p ${installDir}/${libDir} && cp ${libFiles} ${installDir}/${libDir} +mkdir -p ${installDir}/${headerDir} && cp ${headerFiles} ${installDir}/${headerDir} +mkdir -p ${installDir}/${cfgDir} && cp ${cfgFiles} ${installDir}/${cfgDir} +cp ${dockerFiles} ${installDir} + +cp ${rootDir}/build/lib/JDBCDriver*-dist.* ${installDir} 2> /dev/null || : + +cd ${installDir} +tar -zcf tdengine.tar.gz * --remove-files +cd ${currDir} + +cp ${installFiles} ${installDir} + +# Copy example code +cp -r ${codeDir}/examples ${installDir} + +tar -zcf "${installDir}.tar.gz" ${installDir} --remove-files + +mkdir -p ${releaseDir} +mv "${installDir}.tar.gz" ${releaseDir} diff --git a/packaging/docker/remove.sh b/packaging/docker/remove.sh new file mode 100755 index 000000000000..3b99bbf3aa47 --- /dev/null +++ b/packaging/docker/remove.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# ******************************************************** +# Script to uninstall docker-version TAOSDATA on computer +# ******************************************************** + +headerDir="/usr/local/include/tdengine" +cfgDir="/etc/tdengine" +binDir="/usr/local/bin/tdengine" +libDir="/usr/local/lib/tdengine" +linkDir="/usr/bin" +# 1. Stop continer and remove image +# TODO : Check information +sudo docker container stop tdengined || true +sudo docker container rm tdengined || true +sudo docker image rm tdengined_img || true + +sudo docker image rm taos_img || true + +# 2. Remove others +## remove binary files +sudo rm -rf {linkDir}/taos {linkDir}/rmtaos ${binDir} + +## remove header files +sudo rm -rf ${headerDir} + +## remove lib file +sudo rm -rf /usr/lib/libtaos* ${libDir} + +## remove configuration file +sudo rm -rf ${cfgDir} + +# 3. Remove data +while true; do + read -p "Do you want to delete data file? [y/N]: " isDeleteData + + if [[ ( "${isDeleteData}" == "y") || ( "${isDeleteData}" == "Y") ]]; then + sudo docker volume rm -f taos_data taos_log + break + elif [[ ( "${isDeleteData}" == "n") || ( "${isDeleteData}" == "N") ]]; then + break + else + continue + fi +done diff --git a/packaging/docker/run_dnode.sh b/packaging/docker/run_dnode.sh new file mode 100755 index 000000000000..1c48b8e5b230 --- /dev/null +++ b/packaging/docker/run_dnode.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# run dnode +docker run --rm -it --name dnode \ + -p 6140:6140 -p 6160:6160 -p 6180:6180 -p 6200:6200 -p 6240:6240\ + --mount source=taos_data,target=/var/lib/taos/ \ + --mount source=taos_log,target=/var/log/taos/ \ + --mount type=bind,source=/home/hzcheng/Documents/TAOS/taosdata/cfg/,target=/etc/taos/ \ + --network isolated_nw --ip 172.25.0.11 \ + dnode_img diff --git a/packaging/docker/run_mnode.sh b/packaging/docker/run_mnode.sh new file mode 100755 index 000000000000..0f002e7c3c5d --- /dev/null +++ b/packaging/docker/run_mnode.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# run mnode +docker run --rm -it --name mnode \ + -p 6100:6100 -p 6120:6120 -p 6220:6220 -p 6260:6260 -p 6280:6280 \ + --mount source=taos_data,target=/var/lib/taos/ \ + --mount source=taos_log,target=/var/log/taos/ \ + --mount type=bind,source=/home/hzcheng/Documents/TAOS/taosdata/cfg/,target=/etc/taos/ \ + --network isolated_nw --ip 172.25.0.10 \ + mnode_img diff --git a/packaging/docker/run_taos.sh b/packaging/docker/run_taos.sh new file mode 100755 index 000000000000..346ff293be2f --- /dev/null +++ b/packaging/docker/run_taos.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# run taos +docker run --rm -it --name taos \ + --mount type=bind,source=/home/hzcheng/.taos_history,target=/root/.taos_history \ + --mount type=bind,source=/home/hzcheng/Documents/TAOS/taosdata/cfg/,target=/etc/taos/ \ + --network isolated_nw --ip 172.25.0.12 \ + taos_img -p diff --git a/packaging/release.sh b/packaging/release.sh new file mode 100755 index 000000000000..c9666e86ea16 --- /dev/null +++ b/packaging/release.sh @@ -0,0 +1,160 @@ +#!/bin/bash +# +# Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os + +set -e +#set -x + +curr_dir=$(pwd) +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -m ${script_dir}/..)" +versioninfo="${top_dir}/src/util/src/version.c" + +function is_valid_version() { + [ -z $1 ] && return 1 || : + + rx='^([0-9]+\.){3}(\*|[0-9]+)$' + if [[ $1 =~ $rx ]]; then + return 0 + fi + + return 1 +} + +function vercomp () { + if [[ $1 == $2 ]]; then + echo 0 + exit 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + echo 1 + exit 0 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + echo 2 + exit 0 + fi + done + echo 0 +} + +# 1. Read version information +version=$(cat ${versioninfo} | grep " version" | cut -d '"' -f2) +compatible_version=$(cat ${versioninfo} | grep " compatible_version" | cut -d '"' -f2) + +while true; do + read -p "Do you want to release a new version? [y/N]: " is_version_change + + if [[ ( "${is_version_change}" == "y") || ( "${is_version_change}" == "Y") ]]; then + read -p "Please enter the new version: " tversion + while true; do + if (! is_valid_version $tversion) || [ "$(vercomp $tversion $version)" = '2' ]; then + read -p "Please enter a correct version: " tversion + continue + fi + version=${tversion} + break + done + + echo + + read -p "Enter the oldest compatible version: " tversion + while true; do + + if [ -z $tversion ]; then + break + fi + + if (! is_valid_version $tversion) || [ "$(vercomp $version $tversion)" = '2' ]; then + read -p "enter correct compatible version: " tversion + else + compatible_version=$tversion + break + fi + done + + break + elif [[ ( "${is_version_change}" == "n") || ( "${is_version_change}" == "N") ]]; then + echo "Use old version: ${version} compatible version: ${compatible_version}." + break + else + continue + fi +done + +# output the version info to the buildinfo file. +build_time=$(date +"%F %R") +echo "char version[64] = \"${version}\";" > ${versioninfo} +echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo} +echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} +echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo} + +# 2. cmake executable file +#default use debug mode +compile_mode="debug" +if [[ $1 == "Release" ]] || [[ $1 == "release" ]]; then + compile_mode="Release" +fi + +compile_dir="${top_dir}/${compile_mode}" +if [ -d ${compile_dir} ]; then + rm -rf ${compile_dir} +fi + +mkdir -p ${compile_dir} +cd ${compile_dir} +cmake -DCMAKE_BUILD_TYPE=${compile_mode} ${top_dir} +make + +cd ${curr_dir} + +# 3. judge the operating system type, then Call the corresponding script for packaging +osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) +#echo "osinfo: ${osinfo}" + +if echo $osinfo | grep -qwi "ubuntu" ; then + echo "this is ubuntu system" + output_dir="${top_dir}/debs" + if [ -d ${output_dir} ]; then + rm -rf ${output_dir} + fi + mkdir -p ${output_dir} + cd ${script_dir}/deb + ./makedeb.sh ${compile_dir} ${output_dir} ${version} + +elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + output_dir="${top_dir}/rpms" + if [ -d ${output_dir} ]; then + rm -rf ${output_dir} + fi + mkdir -p ${output_dir} + cd ${script_dir}/rpm + ./makerpm.sh ${compile_dir} ${output_dir} ${version} + +else + echo "this is other linux system" +fi + +cd ${script_dir}/tools +./makepkg.sh ${compile_dir} ${version} "${build_time}" + +# 4. Clean up temporary compile directories +#rm -rf ${compile_dir} + diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh new file mode 100755 index 000000000000..395051108008 --- /dev/null +++ b/packaging/rpm/makerpm.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# +# Generate rpm package for centos + +#curr_dir=$(pwd) +compile_dir=$1 +output_dir=$2 +tdengine_ver=$3 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -m ${script_dir}/../..)" +pkg_dir="${top_dir}/rpmworkroom" +spec_file="${script_dir}/tdengine.spec" + +#echo "curr_dir: ${curr_dir}" +#echo "top_dir: ${top_dir}" +#echo "script_dir: ${script_dir}" +echo "compile_dir: ${compile_dir}" +echo "pkg_dir: ${pkg_dir}" +echo "spec_file: ${spec_file}" + +if [ -d ${pkg_dir} ]; then + rm -rf ${pkg_dir} +fi +mkdir -p ${pkg_dir} +cd ${pkg_dir} + +mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS + +rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} + +# copy rpm package to output_dir, then clean temp dir +#echo "rmpbuild end, cur_dir: $(pwd) " +cp -rf RPMS/* ${output_dir} +cd .. +rm -rf ${pkg_dir} diff --git a/packaging/rpm/taosd b/packaging/rpm/taosd new file mode 100755 index 000000000000..6283c7938302 --- /dev/null +++ b/packaging/rpm/taosd @@ -0,0 +1,145 @@ +#!/bin/bash +# +# taosd This shell script takes care of starting and stopping TDEngine. +# +# chkconfig: 2345 99 01 +# description: TDEngine is a districuted, scalable, high-performance Time Series Database +# (TSDB). More than just a pure database, TDEngine also provides the ability +# to do stream computing, aggregation etc. +# +# +### BEGIN INIT INFO +# Provides: taosd +# Required-Start: $network $local_fs $remote_fs +# Required-Stop: $network $local_fs $remote_fs +# Short-Description: start and stop taosd +# Description: TDEngine is a districuted, scalable, high-performance Time Series Database +# (TSDB). More than just a pure database, TDEngine also provides the ability +# to do stream computing, aggregation etc. +### END INIT INFO + +# Source init functions +. /etc/init.d/functions + +# Maximum number of open files +MAX_OPEN_FILES=65535 + +# Default program options +NAME=taosd +PROG=/usr/local/bin/taos/taosd +USER=root +GROUP=root + +# Default directories +LOCK_DIR=/var/lock/subsys +PID_DIR=/var/run/$NAME + +# Set file names +LOCK_FILE=$LOCK_DIR/$NAME +PID_FILE=$PID_DIR/$NAME.pid + +[ -e $PID_DIR ] || mkdir -p $PID_DIR + +PROG_OPTS="" + +start() { + echo -n "Starting ${NAME}: " + # check identity + curid="`id -u -n`" + if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then + echo "Must be run as root or $USER, but was run as $curid" + return 1 + fi + # Sets the maximum number of open file descriptors allowed. + ulimit -n $MAX_OPEN_FILES + curulimit="`ulimit -n`" + if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then + echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit" + return 1 + fi + + if [ "`id -u -n`" == root ] ; then + # Changes the owner of the lock, and the pid files to allow + # non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py. + touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE + touch $PID_FILE && chown $USER:$GROUP $PID_FILE + daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &" + else + # Don't have to change user. + daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &" + fi + retval=$? + sleep 2 + echo + [ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE) + return $retval +} + +stop() { + echo -n "Stopping ${NAME}: " + killproc -p $PID_FILE $NAME + retval=$? + echo + # Non-root users don't have enough permission to remove pid and lock files. + # So, the opentsdb_restart.py cannot get rid of the files, and the command + # "service opentsdb status" will complain about the existing pid file. + # Makes the pid file empty. + echo > $PID_FILE + [ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE) + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + # run checks to determine if the service is running or use generic status + status -p $PID_FILE -l $LOCK_FILE $NAME +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec new file mode 100644 index 000000000000..fbcccc77557b --- /dev/null +++ b/packaging/rpm/tdengine.spec @@ -0,0 +1,121 @@ +%define homepath /usr/local/taos +%define cfg_install_dir /etc/taos + +Name: tdengine +Version: %{_version} +Release: 3%{?dist} +Summary: tdengine from taosdata +Group: Application/Database +License: AGPL +URL: www.taosdata.com + +#BuildRoot: %_topdir/BUILDROOT +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root + +#Prefix: /usr/local/taos + +#BuildRequires: +#Requires: + +%description +Big Data Platform Designed and Optimized for IoT + +#"prep" Nothing needs to be done +#%prep +#%setup -q +#%setup -T + +#"build" Nothing needs to be done +#%build +#%configure +#make %{?_smp_mflags} + +%install +#make install DESTDIR=%{buildroot} +rm -rf %{buildroot} + +echo topdir: %{_topdir} +echo version: %{_version} +echo buildroot: %{buildroot} + +# create install path, and cp file +mkdir -p %{buildroot}%{homepath}/bin +mkdir -p %{buildroot}%{homepath}/cfg +mkdir -p %{buildroot}%{homepath}/connector +mkdir -p %{buildroot}%{homepath}/driver +mkdir -p %{buildroot}%{homepath}/examples +mkdir -p %{buildroot}%{homepath}/include +mkdir -p %{buildroot}%{homepath}/init.d +mkdir -p %{buildroot}%{homepath}/script + +cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg +cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d +cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script +cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/script +cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/lib/libtaos.so %{buildroot}%{homepath}/driver +cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include +cp -r %{_compiledir}/../src/connector/grafana %{buildroot}%{homepath}/connector +cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector +cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector +cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector +cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples + +#Scripts executed before installation +%pre +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +# Stop the service if running +if pidof taosd &> /dev/null; then + if is_using_systemd; then + sudo systemctl stop taosd || : + else + sudo service taosd stop || : + fi + echo "Stop taosd service success!" + sleep 1 +fi + +# if taos.cfg already softlink, remove it +if [ -f %{cfg_install_dir}/taos.cfg ]; then + sudo rm -f %{homepath}/cfg/taos.cfg || : +fi + +#Scripts executed after installation +%post +cd %{homepath}/script +sudo ./post.sh + +# Scripts executed before uninstall +%preun +# only remove package to call preun.sh, not but update(2) +if [ $1 -eq 0 ];then + cd %{homepath}/script + sudo ./preun.sh +fi + +# Scripts executed after uninstall +%postun + +# clean build dir +%clean +rm -rf %{buildroot} + +#Specify the files to be packaged +%files +/* +#%doc + + +#Setting default permissions +%defattr (-,root,root,0755) +#%{prefix} + +#%changelog diff --git a/packaging/tools/check_os.sh b/packaging/tools/check_os.sh new file mode 100755 index 000000000000..92522f7b82e1 --- /dev/null +++ b/packaging/tools/check_os.sh @@ -0,0 +1,52 @@ +# /bin/bash +# +CSI=$(echo -e "\033[") +CRED="${CSI}1;31m" +CFAILURE="$CRED" +CEND="${CSI}0m" +if [ -n "$(grep 'Aliyun Linux release' /etc/issue)" -o -e /etc/redhat-release ]; then + OS=CentOS + [ -n "$(grep ' 7\.' /etc/redhat-release 2> /dev/null)" ] && CentOS_RHEL_version=7 + [ -n "$(grep ' 6\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release6 15' /etc/issue)" ] && CentOS_RHEL_version=6 + [ -n "$(grep ' 5\.' /etc/redhat-release 2> /dev/null)" -o -n "$(grep 'Aliyun Linux release5' /etc/issue)" ] && CentOS_RHEL_version=5 +elif [ -n "$(grep 'Amazon Linux AMI release' /etc/issue)" -o -e /etc/system-release ]; then + OS=CentOS + CentOS_RHEL_version=6 +elif [ -n "$(grep 'bian' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Debian" ]; then + OS=Debian + [ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; } + Debian_version=$(lsb_release -sr | awk -F. '{print $1}') +elif [ -n "$(grep 'Deepin' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Deepin" ]; then + OS=Debian + [ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; } + Debian_version=$(lsb_release -sr | awk -F. '{print $1}') +elif [ -n "$(grep 'Kali GNU/Linux Rolling' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Kali" ]; then + OS=Debian + [ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; } + if [ -n "$(grep 'VERSION="2016.*"' /etc/os-release)" ]; then + Debian_version=8 + else + echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}" + kill -9 $$ + fi +elif [ -n "$(grep 'Ubuntu' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == "Ubuntu" -o -n "$(grep 'Linux Mint' /etc/issue)" ]; then + OS=Ubuntu + [ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; } + Ubuntu_version=$(lsb_release -sr | awk -F. '{print $1}') + [ -n "$(grep 'Linux Mint 18' /etc/issue)" ] && Ubuntu_version=16 +elif [ -n "$(grep 'elementary' /etc/issue)" -o "$(lsb_release -is 2>/dev/null)" == 'elementary' ]; then + OS=Ubuntu + [ ! -e "$(which lsb_release)" ] && { apt-get -y update; apt-get -y install lsb-release; clear; } + Ubuntu_version=16 +else + echo "${CFAILURE}Does not support this OS, Please contact the author! ${CEND}" + kill -9 $$ +fi + +echo "${CFAILURE}${OS}${CEND}" +if [ "$OS" == 'CentOS' ]; then + echo ${CentOS_RHEL_version} +else + echo ${Ubuntu_version} +fi + diff --git a/packaging/tools/get_version.sh b/packaging/tools/get_version.sh new file mode 100755 index 000000000000..e0bf9d5055d5 --- /dev/null +++ b/packaging/tools/get_version.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# +# This file is used to install TAOS time-series database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +# set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -m "$0")) +verinfo=$(cat ${script_dir}/../../src/util/src/version.c | grep " version" | cut -d '"' -f2) +verinfo=$(echo $verinfo | tr "\n" " ") +len=$(echo ${#verinfo}) +len=$((len-1)) +retval=$(echo -ne ${verinfo:0:${len}}) +echo -ne $retval diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh new file mode 100755 index 000000000000..4d2a66d6058e --- /dev/null +++ b/packaging/tools/install.sh @@ -0,0 +1,381 @@ +#!/bin/bash +# +# This file is used to install TAOS time-series database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +#set -x + +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -m "$0")) +# Dynamic directory +data_dir="/var/lib/taos" +log_dir="/var/log/taos" + +data_link_dir="/usr/local/taos/data" +log_link_dir="/usr/local/taos/log" + +cfg_install_dir="/etc/taos" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/taos" + +# old bin dir +bin_dir="/usr/local/taos/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +if ! is_using_systemd; then + service_config_dir="/etc/init.d" +fi + +function install_main_path() { + #create install main dir and all sub dir + sudo rm -rf ${install_main_dir} || : + sudo mkdir -p ${install_main_dir} + sudo mkdir -p ${install_main_dir}/cfg + sudo mkdir -p ${install_main_dir}/bin + sudo mkdir -p ${install_main_dir}/connector + sudo mkdir -p ${install_main_dir}/driver + sudo mkdir -p ${install_main_dir}/examples + sudo mkdir -p ${install_main_dir}/include + sudo mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + sudo rm -f ${bin_link_dir}/taos || : + sudo rm -f ${bin_link_dir}/taosd || : + sudo rm -f ${bin_link_dir}/taosdump || : + sudo rm -f ${bin_link_dir}/rmtaos || : + + sudo cp -r ${script_dir}/bin/* ${install_main_dir}/bin && sudo chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/taos ] && sudo ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + [ -x ${install_main_dir}/bin/taosd ] && sudo ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosdump ] && sudo ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/remove.sh ] && sudo ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : +} + +function install_lib() { + # Remove links + sudo rm -f ${lib_link_dir}/libtaos.so || : + + sudo cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && sudo chmod 777 ${install_main_dir}/driver/* + + sudo ln -s ${install_main_dir}/driver/libtaos.* ${install_main_dir}/driver/libtaos.so.1 + sudo ln -s ${install_main_dir}/driver/libtaos.so.1 ${lib_link_dir}/libtaos.so +} + +function install_header() { + sudo rm -f ${inc_link_dir}/taos.h || : + sudo cp -f ${script_dir}/inc/* ${install_main_dir}/include && sudo chmod 644 ${install_main_dir}/include/* + sudo ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h +} + +function install_config() { + #sudo rm -f ${install_main_dir}/cfg/taos.cfg || : + + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + sudo sudo mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/cfg/taos.cfg ] && sudo cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} + sudo chmod 644 ${cfg_install_dir}/* + fi + + sudo cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + sudo ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg +} + +function install_log() { + sudo rm -rf ${log_dir} || : + sudo mkdir -p ${log_dir} && sudo chmod 777 ${log_dir} + + sudo ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + sudo mkdir -p ${data_dir} + + sudo ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + sudo cp -rf ${script_dir}/connector/* ${install_main_dir}/connector +} + +function install_examples() { + sudo cp -rf ${script_dir}/examples/* ${install_main_dir}/examples +} + +function clean_service_on_sysvinit() { + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + if pidof taosd &> /dev/null; then + sudo service taosd stop || : + fi + sudo sed -i "\|${restart_config_str}|d" /etc/inittab || : + sudo rm -f ${service_config_dir}/taosd || : + sudo update-rc.d -f taosd remove || : + sudo init q || : +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + + sleep 1 + + # Install taosd service + sudo cp -f ${script_dir}/init.d/taosd ${install_main_dir}/init.d + sudo cp ${script_dir}/init.d/taosd ${service_config_dir} && sudo chmod a+x ${service_config_dir}/taosd + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + + sudo grep -q -F "$restart_config_str" /etc/inittab || sudo bash -c "echo '${restart_config_str}' >> /etc/inittab" + # TODO: for centos, change here + sudo update-rc.d taosd defaults + # chkconfig mysqld on +} + +function clean_service_on_systemd() { + taosd_service_config="${service_config_dir}/taosd.service" + + if systemctl is-active --quiet taosd; then + echo "TDengine is running, stopping it..." + sudo systemctl stop taosd &> /dev/null || echo &> /dev/null + fi + sudo systemctl disable taosd &> /dev/null || echo &> /dev/null + + sudo rm -f ${taosd_service_config} +} + +# taos:2345:respawn:/etc/init.d/taosd start + +function install_service_on_systemd() { + clean_service_on_systemd + + taosd_service_config="${service_config_dir}/taosd.service" + + sudo bash -c "echo '[Unit]' >> ${taosd_service_config}" + sudo bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" + sudo bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" + sudo bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + sudo bash -c "echo >> ${taosd_service_config}" + sudo bash -c "echo '[Service]' >> ${taosd_service_config}" + sudo bash -c "echo 'Type=simple' >> ${taosd_service_config}" + sudo bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" + sudo bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" + sudo bash -c "echo 'Restart=always' >> ${taosd_service_config}" + sudo bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" + sudo bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + sudo bash -c "echo >> ${taosd_service_config}" + sudo bash -c "echo '[Install]' >> ${taosd_service_config}" + sudo bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" + sudo systemctl enable taosd +} + +function install_service() { + if is_using_systemd; then + install_service_on_systemd + else + install_service_on_sysvinit + fi +} + +vercomp () { + if [[ $1 == $2 ]]; then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +function is_version_compatible() { + + curr_version=$(${bin_dir}/taosd -V | cut -d ' ' -f 1) + + min_compatible_version=$(${script_dir}/bin/taosd -V | cut -d ' ' -f 2) + + vercomp $curr_version $min_compatible_version + case $? in + 0) return 0;; + 1) return 0;; + 2) return 1;; + esac +} + +function update_TDengine() { + # Start to update + if [ ! -e taos.tar.gz ]; then + echo "File taos.tar.gz does not exist" + exit 1 + fi + tar -zxf taos.tar.gz + + # Check if version compatible + if ! is_version_compatible; then + echo -e "${RED}Version incompatible${NC}" + return 1 + fi + + echo -e "${GREEN}Start to update TDEngine...${NC}" + # Stop the service if running + if pidof taosd &> /dev/null; then + if is_using_systemd; then + sudo systemctl stop taosd || : + else + sudo service taosd stop || : + fi + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + install_connector + install_examples + if [ -z $1 ]; then + install_bin + install_service + install_config + + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if is_using_systemd; then + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo systemctl start taosd${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo update-rc.d taosd default ${RED} for the first time${NC}" + echo -e " : sudo service taosd start ${RED} after${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + else + install_bin $1 + install_config + + echo + echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" + fi + + rm -rf $(tar -tf taos.tar.gz) +} + +function install_TDengine() { + # Start to install + if [ ! -e taos.tar.gz ]; then + echo "File taos.tar.gz does not exist" + exit 1 + fi + tar -zxf taos.tar.gz + + echo -e "${GREEN}Start to install TDEngine...${NC}" + + install_main_path + + if [ -z $1 ]; then + install_data + fi + + install_log + install_header + install_lib + install_connector + install_examples + + if [ -z $1 ]; then # install service and client + # For installing new + install_bin + install_service + install_config + + # Ask if to start the service + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if is_using_systemd; then + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo systemctl start taosd${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo update-rc.d taosd default ${RED} for the first time${NC}" + echo -e " : sudo service taosd start ${RED} after${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + else # Only install client + install_bin + install_config + + echo + echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" + fi + + rm -rf $(tar -tf taos.tar.gz) +} + + +## ==============================Main program starts from here============================ +if [ -z $1 ]; then + # Install server and client + if [ -x ${bin_dir}/taosd ]; then + update_TDengine + else + install_TDengine + fi +else + # Only install client + if [ -x ${bin_dir}/taos ]; then + update_TDengine client + else + install_TDengine client + fi +fi diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh new file mode 100755 index 000000000000..39437c145e0c --- /dev/null +++ b/packaging/tools/install_client.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +script_dir=$(dirname $(readlink -m "$0")) +${script_dir}/install.sh client diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh new file mode 100755 index 000000000000..5176c76a831f --- /dev/null +++ b/packaging/tools/make_install.sh @@ -0,0 +1,293 @@ +#!/bin/bash +# +# This file is used to install TAOS time-series database on linux systems. The operating system +# is required to use systemd to manage services at boot + +set -e +# set -x + +# -----------------------Variables definition--------------------- +source_dir=$1 +binary_dir=$2 +script_dir=$(dirname $(readlink -m "$0")) +# Dynamic directory +data_dir="/var/lib/taos" +log_dir="/var/log/taos" + +data_link_dir="/usr/local/taos/data" +log_link_dir="/usr/local/taos/log" + +cfg_install_dir="/etc/taos" + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +#install main path +install_main_dir="/usr/local/taos" + +# old bin +bin_dir="/usr/local/taos/bin" + +service_config_dir="/etc/systemd/system" + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +if ! is_using_systemd; then + service_config_dir="/etc/init.d" +fi + +function install_main_path() { + #create install main dir and all sub dir + sudo rm -rf ${install_main_dir} || : + sudo mkdir -p ${install_main_dir} + sudo mkdir -p ${install_main_dir}/cfg + sudo mkdir -p ${install_main_dir}/bin + sudo mkdir -p ${install_main_dir}/connector + sudo mkdir -p ${install_main_dir}/driver + sudo mkdir -p ${install_main_dir}/examples + sudo mkdir -p ${install_main_dir}/include + sudo mkdir -p ${install_main_dir}/init.d +} + +function install_bin() { + # Remove links + sudo rm -f ${bin_link_dir}/taos || : + sudo rm -f ${bin_link_dir}/taosd || : + sudo rm -f ${bin_link_dir}/taosdump || : + sudo rm -f ${bin_link_dir}/rmtaos || : + + sudo cp -r ${binary_dir}/build/bin/taos ${install_main_dir}/bin + sudo cp -r ${binary_dir}/build/bin/taosd ${install_main_dir}/bin + sudo cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin + sudo cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + sudo chmod 0555 ${install_main_dir}/bin/* + + #Make link + [ -x ${install_main_dir}/bin/taos ] && sudo ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + [ -x ${install_main_dir}/bin/taosd ] && sudo ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosdump ] && sudo ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/remove.sh ] && sudo ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : +} + +function install_lib() { + # Remove links + sudo rm -f ${lib_link_dir}/libtaos.so || : + + versioninfo=$(${script_dir}/get_version.sh) + sudo cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && sudo chmod 777 ${install_main_dir}/driver/* + sudo ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${install_main_dir}/driver/libtaos.so.1 + sudo ln -sf ${install_main_dir}/driver/libtaos.so.1 ${lib_link_dir}/libtaos.so +} + +function install_header() { + + sudo rm -f ${inc_link_dir}/taos.h || : + sudo cp -f ${source_dir}/src/inc/taos.h ${install_main_dir}/include && sudo chmod 644 ${install_main_dir}/include/* + sudo ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h +} + +function install_config() { + #sudo rm -f ${install_main_dir}/cfg/taos.cfg || : + + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + sudo sudo mkdir -p ${cfg_install_dir} + [ -f ${script_dir}/../cfg/taos.cfg ] && sudo cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} + sudo chmod 644 ${cfg_install_dir}/* + fi + + sudo cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + sudo ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg +} + +function install_log() { + sudo rm -rf ${log_dir} || : + sudo mkdir -p ${log_dir} && sudo chmod 777 ${log_dir} + + sudo ln -s ${log_dir} ${install_main_dir}/log +} + +function install_data() { + sudo mkdir -p ${data_dir} + sudo ln -s ${data_dir} ${install_main_dir}/data +} + +function install_connector() { + sudo cp -rf ${source_dir}/src/connector/grafana ${install_main_dir}/connector + sudo cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector + sudo cp -rf ${source_dir}/src/connector/go ${install_main_dir}/connector + + sudo cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && sudo chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null +} + +function install_examples() { + sudo cp -rf ${source_dir}/tests/examples/* ${install_main_dir}/examples +} + +function clean_service_on_sysvinit() { + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + if pidof taosd &> /dev/null; then + sudo service taosd stop || : + fi + sudo sed -i "\|${restart_config_str}|d" /etc/inittab || : + sudo rm -f ${service_config_dir}/taosd || : + sudo update-rc.d -f taosd remove || : + sudo init q || : +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + + sleep 1 + + # Install taosd service + sudo cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && sudo chmod a+x ${service_config_dir}/taosd + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + + sudo grep -q -F "$restart_config_str" /etc/inittab || sudo bash -c "echo '${restart_config_str}' >> /etc/inittab" + # TODO: for centos, change here + sudo update-rc.d taosd defaults + # chkconfig mysqld on +} + +function clean_service_on_systemd() { + taosd_service_config="${service_config_dir}/taosd.service" + + if systemctl is-active --quiet taosd; then + echo "TDengine is running, stopping it..." + sudo systemctl stop taosd &> /dev/null || echo &> /dev/null + fi + sudo systemctl disable taosd &> /dev/null || echo &> /dev/null + + sudo rm -f ${taosd_service_config} +} + +# taos:2345:respawn:/etc/init.d/taosd start + +function install_service_on_systemd() { + clean_service_on_systemd + + taosd_service_config="${service_config_dir}/taosd.service" + + sudo bash -c "echo '[Unit]' >> ${taosd_service_config}" + sudo bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" + sudo bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" + sudo bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + sudo bash -c "echo >> ${taosd_service_config}" + sudo bash -c "echo '[Service]' >> ${taosd_service_config}" + sudo bash -c "echo 'Type=simple' >> ${taosd_service_config}" + sudo bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" + sudo bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" + sudo bash -c "echo 'Restart=always' >> ${taosd_service_config}" + sudo bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" + sudo bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + sudo bash -c "echo >> ${taosd_service_config}" + sudo bash -c "echo '[Install]' >> ${taosd_service_config}" + sudo bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" + sudo systemctl enable taosd +} + +function install_service() { + if is_using_systemd; then + install_service_on_systemd + else + install_service_on_sysvinit + fi +} + +function update_TDengine() { + echo -e "${GREEN}Start to update TDEngine...${NC}" + # Stop the service if running + if pidof taosd &> /dev/null; then + if is_using_systemd; then + sudo systemctl stop taosd || : + else + sudo service taosd stop || : + fi + sleep 1 + fi + + install_main_path + + install_log + install_header + install_lib + install_bin + install_service + install_config + install_connector + install_examples + + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if is_using_systemd; then + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo systemctl start taosd${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo update-rc.d taosd default ${RED} for the first time${NC}" + echo -e " : sudo service taosd start ${RED} after${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" +} + +function install_TDengine() { + # Start to install + echo -e "${GREEN}Start to install TDEngine...${NC}" + + install_main_path + install_data + install_log + install_header + install_bin + install_lib + install_service + install_config + install_connector + install_examples + + # Ask if to start the service + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if is_using_systemd; then + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo systemctl start taosd${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo update-rc.d taosd default ${RED} for the first time${NC}" + echo -e " : sudo service taosd start ${RED} after${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" +} + +## ==============================Main program starts from here============================ +echo source directory: $1 +echo binary directory: $2 +if [ -x ${bin_dir}/taosd ]; then + update_TDengine +else + install_TDengine +fi diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh new file mode 100755 index 000000000000..81a0b8382576 --- /dev/null +++ b/packaging/tools/makepkg.sh @@ -0,0 +1,81 @@ +#!/bin/bash +# +# Generate deb package for other os system (no unbutu or centos) + +curr_dir=$(pwd) +compile_dir=$1 +version=$2 +build_time=$3 + +script_dir="$(dirname $(readlink -f $0))" +top_dir="$(readlink -m ${script_dir}/../..)" + +# create compressed install file. +build_dir="${compile_dir}/build" +code_dir="${top_dir}/src" +release_dir="${top_dir}/release" + +package_name='linux' +install_dir="${release_dir}/taos-${version}-${package_name}-$(echo ${build_time}| tr ': ' -)" + +# Directories and files. +bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove.sh" +versioninfo=$(${script_dir}/get_version.sh) +lib_files="${build_dir}/lib/libtaos.so.${versioninfo}" +header_files="${code_dir}/inc/taos.h" +cfg_files="${top_dir}/packaging/cfg/*.cfg" +install_files="${script_dir}/install.sh ${script_dir}/install_client.sh" + +# Init file +#init_dir=${script_dir}/deb +#if [ $package_type = "centos" ]; then +# init_dir=${script_dir}/rpm +#fi +#init_files=${init_dir}/taosd +# temp use rpm's taosd. TODO: later modify according to os type +init_files=${script_dir}/../rpm/taosd + +# make directories. +mkdir -p ${install_dir} +mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc +mkdir -p ${install_dir}/cfg && cp ${cfg_files} ${install_dir}/cfg +mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* +mkdir -p ${install_dir}/init.d && cp ${init_files} ${install_dir}/init.d + +cd ${install_dir} +tar -zcv -f taos.tar.gz * --remove-files || : + +cd ${curr_dir} +cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* + +# Copy example code +mkdir -p ${install_dir}/examples +cp -r ${top_dir}/tests/examples/c ${install_dir}/examples +cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples +cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples +cp -r ${top_dir}/tests/examples/python ${install_dir}/examples +cp -r ${top_dir}/tests/examples/R ${install_dir}/examples +cp -r ${top_dir}/tests/examples/go ${install_dir}/examples + +# Copy driver +mkdir -p ${install_dir}/driver +cp ${lib_files} ${install_dir}/driver + +# Copy connector +connector_dir="${code_dir}/connector" +mkdir -p ${install_dir}/connector +cp -r ${connector_dir}/grafana ${install_dir}/connector/ +cp -r ${connector_dir}/python ${install_dir}/connector/ +cp -r ${connector_dir}/go ${install_dir}/connector +cp ${build_dir}/lib/*.jar ${install_dir}/connector + + +# Copy release note +cp ${script_dir}/release_note ${install_dir} + +# exit 1 + +cd ${release_dir} +tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files + +cd ${curr_dir} diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh new file mode 100755 index 000000000000..4483f1c7b089 --- /dev/null +++ b/packaging/tools/post.sh @@ -0,0 +1,203 @@ +#!/bin/bash +# +# This file is used to install tdengine rpm package on centos systems. The operating system +# is required to use systemd to manage services at boot +#set -x +# -----------------------Variables definition--------------------- +script_dir=$(dirname $(readlink -m "$0")) +# Dynamic directory +data_dir="/var/lib/taos" +log_dir="/var/log/taos" +data_link_dir="/usr/local/taos/data" +log_link_dir="/usr/local/taos/log" + +# static directory +cfg_dir="/usr/local/taos/cfg" +bin_dir="/usr/local/taos/bin" +lib_dir="/usr/local/taos/driver" +init_d_dir="/usr/local/taos/init.d" +inc_dir="/usr/local/taos/include" + +cfg_install_dir="/etc/taos" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +service_config_dir="/etc/systemd/system" + + +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' + +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +if ! is_using_systemd; then + service_config_dir="/etc/init.d" +fi + +function install_include() { + sudo rm -f ${inc_link_dir}/taos.h || : + sudo ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h +} + +function install_lib() { + sudo rm -f ${lib_link_dir}/libtaos.so || : + sudo ln -s ${lib_dir}/libtaos.so ${lib_link_dir}/libtaos.so + #sudo ln -s ${lib_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so.1 || : +} + +function install_bin() { + # Remove links + sudo rm -f ${bin_link_dir}/taos || : + sudo rm -f ${bin_link_dir}/taosd || : + sudo rm -f ${bin_link_dir}/taosdump || : + sudo rm -f ${bin_link_dir}/rmtaos || : + + sudo chmod 0555 ${bin_dir}/* + + #Make link + [ -x ${bin_dir}/taos ] && sudo ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : + [ -x ${bin_dir}/taosd ] && sudo ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : +# [ -x ${bin_dir}/taosdump ] && sudo ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : +# [ -x ${bin_dir}/remove.sh ] && sudo ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || : +} + +function install_config() { + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + sudo sudo mkdir -p ${cfg_install_dir} + [ -f ${cfg_dir}/taos.cfg ] && sudo cp ${cfg_dir}/taos.cfg ${cfg_install_dir} + sudo chmod 644 ${cfg_install_dir}/* + fi + + sudo mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.org + sudo ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir} +} + +function clean_service_on_sysvinit() { + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + #if pidof taosd &> /dev/null; then + # sudo service taosd stop || : + #fi + sudo sed -i "\|${restart_config_str}|d" /etc/inittab || : + sudo rm -f ${service_config_dir}/taosd || : + sudo update-rc.d -f taosd remove || : + sudo init q || : +} + +function install_service_on_sysvinit() { + clean_service_on_sysvinit + + sleep 1 + + # Install taosd service + sudo cp %{init_d_dir}/taosd ${service_config_dir} && sudo chmod a+x ${service_config_dir}/taosd + + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + + sudo grep -q -F "$restart_config_str" /etc/inittab || sudo bash -c "echo '${restart_config_str}' >> /etc/inittab" + # TODO: for centos, change here + sudo update-rc.d taosd defaults + # chkconfig mysqld on +} + +function clean_service_on_systemd() { + taosd_service_config="${service_config_dir}/taosd.service" + + # taosd service already is stoped before install + #if systemctl is-active --quiet taosd; then + # echo "TDengine is running, stopping it..." + # sudo systemctl stop taosd &> /dev/null || echo &> /dev/null + #fi + sudo systemctl disable taosd &> /dev/null || echo &> /dev/null + + sudo rm -f ${taosd_service_config} +} + +# taos:2345:respawn:/etc/init.d/taosd start + +function install_service_on_systemd() { + clean_service_on_systemd + + taosd_service_config="${service_config_dir}/taosd.service" + + sudo bash -c "echo '[Unit]' >> ${taosd_service_config}" + sudo bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" + sudo bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" + sudo bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + sudo bash -c "echo >> ${taosd_service_config}" + sudo bash -c "echo '[Service]' >> ${taosd_service_config}" + sudo bash -c "echo 'Type=simple' >> ${taosd_service_config}" + sudo bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" + sudo bash -c "echo 'TimeoutStartSec=0' >> ${taosd_service_config}" + sudo bash -c "echo 'StandardOutput=null' >> ${taosd_service_config}" + sudo bash -c "echo 'Restart=always' >> ${taosd_service_config}" + sudo bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" + sudo bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + sudo bash -c "echo >> ${taosd_service_config}" + sudo bash -c "echo '[Install]' >> ${taosd_service_config}" + sudo bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" + sudo systemctl enable taosd +} + +function install_service() { + if is_using_systemd; then + install_service_on_systemd + else + install_service_on_sysvinit + fi +} + +function install_TDengine() { + echo -e "${GREEN}Start to install TDEngine...${NC}" + + #install log and data dir , then ln to /usr/local/taos + sudo mkdir -p ${log_dir} && sudo chmod 777 ${log_dir} + sudo mkdir -p ${data_dir} + + sudo rm -rf ${log_link_dir} || : + sudo rm -rf ${data_link_dir} || : + + sudo ln -s ${log_dir} ${log_link_dir} || : + sudo ln -s ${data_dir} ${data_link_dir} || : + + # Install include, lib, binary and service + install_include + install_lib + install_bin + install_service + install_config + + # Ask if to start the service + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if is_using_systemd; then + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo systemctl start taosd${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: sudo update-rc.d taosd default ${RED} for the first time${NC}" + echo -e " : sudo service taosd start ${RED} after${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" +} + + +## ==============================Main program starts from here============================ +install_TDengine diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh new file mode 100755 index 000000000000..387829bb4432 --- /dev/null +++ b/packaging/tools/preun.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Script to stop the service and uninstall TSDB + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +data_link_dir="/usr/local/taos/data" +log_link_dir="/usr/local/taos/log" +cfg_link_dir="/usr/local/taos/cfg" + +service_config_dir="/etc/systemd/system" +taos_service_name="taosd" + +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +if ! is_using_systemd; then + service_config_dir="/etc/init.d" +fi + +function clean_service_on_systemd() { + taosd_service_config="${service_config_dir}/${taos_service_name}.service" + + if systemctl is-active --quiet ${taos_service_name}; then + echo "TDengine taosd is running, stopping it..." + sudo systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null + fi + sudo systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null + + sudo rm -f ${taosd_service_config} +} + +function clean_service_on_sysvinit() { + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + + if pidof taosd &> /dev/null; then + echo "TDengine taosd is running, stopping it..." + sudo service taosd stop || : + fi + + sudo sed -i "\|${restart_config_str}|d" /etc/inittab || : + sudo rm -f ${service_config_dir}/taosd || : + sudo update-rc.d -f taosd remove || : + sudo init q || : +} + +function clean_service() { + if is_using_systemd; then + clean_service_on_systemd + else + clean_service_on_sysvinit + fi +} + +# Stop service and disable booting start. +clean_service + +# Remove all links +sudo rm -f ${bin_link_dir}/taos || : +sudo rm -f ${bin_link_dir}/taosd || : +sudo rm -f ${cfg_link_dir}/taos.cfg || : +sudo rm -f ${inc_link_dir}/taos.h || : +sudo rm -f ${lib_link_dir}/libtaos.so || : + +sudo rm -f ${log_link_dir} || : +sudo rm -f ${data_link_dir} || : + +echo -e "${GREEN}TDEngine is removed successfully!${NC}" diff --git a/packaging/tools/release_note b/packaging/tools/release_note new file mode 100644 index 000000000000..3a3cd81ca91b --- /dev/null +++ b/packaging/tools/release_note @@ -0,0 +1,125 @@ +taos-1.5.2.6 (Release on 2019-05-13) +Bug fixed: + - Nchar strings sometimes were wrongly truncated on Window + - Importing data from file throws an error of "invalid SQL" + +taos-1.5.2.5 (Release on 2019-05-13) +Bug fixed: + - Long timespan data import sometimes affects query result + - Synchronzation of cluster dnodes worked incorrectly when importing + +taos-1.5.2.4 (Release on 2019-05-10) +New Features: + - Optimized Windows client installation: now users don't need to copy taos.dll manually + - Changed the priority of taos.cfg and JDBC URL: parameters in JDCB URL now has a higher priority than parameters in taos.cfg +Bug fixed: + - Expired data files were not deleted corrected + - Occasionally importing returned "affected rows" which larger than 0, but 0 row was actually written into db + - Commit log is occupied by too many import-to-file requests, which blocked further data importing + - Cloud service shows a wrong number of available days with current balance + - Other minor issues + +taos-1.5.1 (Release on 2019-04-09) +New Features: + - Maximum number of rows returned by "top/bottom" methods increased from 20 to 100 + - Improved the performance of "first/last" methods + - Increased system stability +Bug fixed: + - Connection failure when query on huge STables through TPC + - The primary timestamp is occasionally returned as NULL in some queries + - Operation failure when updating a tag value to NULL + - Stream calculation couldn't start at certain occasions + +taos-1.5.0 (Release on 2019-03-11) +New Features: + - New syntax to automatically create tables when inserting values into non-existing tables + - New syntax "slimit/soffset" to pagenate groups in a query result set + - Support "top/bottom" queries on a supertable + - High performance statistic aggregation function "apercentile" + - Remove "first_t/last_t" functions; improve the performance of "first/last" function + - Add pre-aggregation for bool type values + - Supports fixed-length streaming computation, i.e. users may define an end time for a stream + - New JAVA API for SQL subscription, supports table/supertable/SQL query subscription +Bug fixed: + - Data file broken issue when frequently using "import" + - Using "spread" on a super table may return negative values + - RPC bug that random network packets might cause the RPC module to crush + +taos-1.4.15 (Released on 2019-01-23) +New Features: + - JDBC Driver now supports configuring timezone, locale, cfgdir in JDBC url + - A new API is added to validate if a table creation sql statement is correct in syntax without actually creating that table +Bugs Fixed: + - "select last(*) from STable" sometimest returned incorrect number of rows + - JDBC driver method ResultSetMetaData.getColumnClassName() returned wrong values. + - Web shell automatically changed query string to lower case + +taos-1.4.14 (Released on 2018-12-22) +New Features: + - C Driver support for integration with Python + - JDBC Driver support for integration with R and MATLAB + +taos-1.4.13 (Released on 2018-12-14) +Bugs Fixed: + - Clients failed to connect to server due to unexpected and invalid packets recieved by the server. +Features Added: + - Add support to HikariCP in TSDB JDBC driver. + +taos-1.4.12 (Released on 2018-12-08) +Bugs Fixed: + - Querying data while inserting into the database might return incomplete resultsets. +Features Added: + - A new python driver is added. + - Increased system stability. + - Changed meaning of database configuration paramerter 'ablocks'. 'ablocks' used to refer to the number of total cache blocks in memory, now it refers to average number of cache blocks for each table in memory. + +taos-1.4.11 (Released on 2018-11-23) +Bugs Fixed: + - Thread memory leaking during high-frequency committing. + - Master dnode selection failure caused by accidental network issues. +Features Added: + - Change keyword "metrics" to "stables", i.e. supertables; the previous query "show metrics" is now changed to "show stables". + - Add an error message mechanism in C# driver. An error with message "Failed to connect to server" is thrown when fetching data experienced a network connection interruption during data transmitting. + +taos-1.4.10 (Released on 2018-11-13) +Bugs Fixed: + - Taosdump failed while exporting extremely large datasets to a .sql file. + - Commit status did not change correctly if the last commit was triggered by commit threshold time (ctime) and no more new data was written to DB during the next ctime period. +Features Added: + - Support importing historical data from Telegraf interface. + - Support MyBatis framework in TSDB JDBC Driver. + - Change result set row indexing in JDBC Driver. Result set row indexes now starts from 1 instead of 0. + +taos-1.4.9 (Released on 2018-11-02) +Bugs Fixed: + - Dumping data using UTF-8 format in client shell failed. + - Tag query failed using C# Driver. + - Committing data to disk failed if DB files were corrupted. + - Continuously pressing Ctrl+c in client shell for multiple times produced a segmentation fault. +Features Added: + - Changed the display pattern in shell for taosdump. + - Add a check to the status of an existing resultset before firing a new query in a single JDBC connection. A connection can only have a single open resultset, and the resultset must be closed before one can execute new queries. + + +taos-1.4.7 (Released on 2018-10-25) +Bug Fixed: + - UTF-8 encoding in JDBC Driver did not give the correct Chinese characters. + - Fix crash error when where clause is too long. +Features Added: + - Add check on database properties, force ablocks to be at least (4 * tables) in a vnode. + - Check if pVgroup is empty in sdb. + +taos-1.4.6 (Released on 2018-10-21) +Bug Fixed: + - Fix wrong symbol addition while export csv file. +Features Added: + - Update grafana plugins. + - Update python drivers. + - Add error code explanation in JDBC Driver. + - Prohibit login while the version of server and client are not match. + +taos-1.4.5 (Released on 2018-10-17) +Bug Fixed: + - Fix HTTP request truncation bug in Telegraf interface. +Features Added: + - Support nchar and null object in JDBC Driver. diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh new file mode 100755 index 000000000000..0dc9b9225874 --- /dev/null +++ b/packaging/tools/remove.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# +# Script to stop the service and uninstall TSDB + +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' + +data_dir="/var/lib/taos" +log_dir="/var/log/taos" + +#install main path +install_main_dir="/usr/local/taos" + +data_link_dir="/usr/local/taos/data" +log_link_dir="/usr/local/taos/log" + +cfg_link_dir="/usr/local/taos/cfg" +bin_link_dir="/usr/bin" +lib_link_dir="/usr/lib" +inc_link_dir="/usr/include" + +header_dir="/usr/local/include/taos" +cfg_dir="/etc/taos" +bin_dir="/usr/local/bin/taos" +lib_dir="/usr/local/lib/taos" +link_dir="/usr/bin" +service_config_dir="/etc/systemd/system" +taos_service_name="taosd" +nginx_service_name="tdnginx" + +function is_using_systemd() { + if pidof systemd &> /dev/null; then + return 0 + else + return 1 + fi +} + +if ! is_using_systemd; then + service_config_dir="/etc/init.d" +fi + +function clean_bin() { + # Remove link + sudo rm -f ${bin_link_dir}/taos || : + sudo rm -f ${bin_link_dir}/taosd || : + sudo rm -f ${bin_link_dir}/taosdump || : + sudo rm -f ${bin_link_dir}/rmtaos || : + + # Remove binary files + #sudo rm -rf ${bin_dir} || : +} +function clean_lib() { + # Remove link + sudo rm -f ${lib_link_dir}/libtaos.so || : + + #sudo rm -f /usr/lib/libtaos.so || : + #sudo rm -rf ${lib_dir} || : +} + +function clean_header() { + # Remove link + sudo rm -f ${inc_link_dir}/taos.h || : + + #sudo rm -rf ${header_dir} +} + +function clean_config() { + # Remove link + sudo rm -f ${cfg_link_dir}/taos.cfg || : + #sudo rm -rf ${cfg_link_dir} || : +} + +function clean_log() { + if grep -e '^\s*logDir.*$' ${cfg_dir}/taos.cfg &> /dev/null; then + config_log_dir=$(cut -d ' ' -f2 <<< $(grep -e '^\s*logDir.*$' ${cfg_dir}/taos.cfg)) + # echo "Removing log dir ${config_log_dir}......" + sudo rm -rf ${config_log_dir} || : + fi + + # Remove link + sudo rm -rf ${log_link_dir} || : + sudo rm -rf ${log_dir} || : +} + +function clean_service_on_systemd() { + taosd_service_config="${service_config_dir}/${taos_service_name}.service" + + if systemctl is-active --quiet ${taos_service_name}; then + echo "TDengine taosd is running, stopping it..." + sudo systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null + fi + sudo systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null + + sudo rm -f ${taosd_service_config} +} + +function clean_service_on_sysvinit() { + restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" + + if pidof taosd &> /dev/null; then + echo "TDengine taosd is running, stopping it..." + sudo service taosd stop || : + fi + + sudo sed -i "\|${restart_config_str}|d" /etc/inittab || : + sudo rm -f ${service_config_dir}/taosd || : + sudo update-rc.d -f taosd remove || : + sudo init q || : +} + +function clean_service() { + if is_using_systemd; then + clean_service_on_systemd + else + clean_service_on_sysvinit + fi +} + +isAll="true" +if ! type taosd &> /dev/null; then + isAll="false" +fi + +config_data_dir='' +if grep -e '^\s*dataDir.*$' ${cfg_dir}/taos.cfg &> /dev/null; then + config_data_dir=$(cut -d ' ' -f2 <<< $(grep -e '^\s*dataDir.*$' ${cfg_dir}/taos.cfg)) +fi + +# Stop service and disable booting start. +clean_service +# Remove binary file and links +clean_bin +# Remove header file. +clean_header +# Remove lib file +clean_lib +# Remove log directory +clean_log +# Remove configuration file +clean_config +# Remove data directory +sudo rm -rf ${data_link_dir} || : + +[ "$isAll" = "false" ] && exit 0 || : +echo -e -n "${RED}Do you want to delete data stored in TDengine? [y/N]: ${NC}" +read is_delete +while true; do + if [ "${is_delete}" = "y" ] || [ "${is_delete}" = "Y" ]; then + sudo rm -rf ${data_dir} + # echo "Removing data file ${config_data_dir}..." + [ -n ${config_data_dir} ] && sudo rm -rf ${config_data_dir} + break + elif [ "${is_delete}" = "n" ] || [ "${is_delete}" = "N" ]; then + break + else + read -p "Please enter 'y' or 'n': " is_delete + fi +done + +sudo rm -rf ${install_main_dir} + +osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) +if echo $osinfo | grep -qwi "ubuntu" ; then +# echo "this is ubuntu system" + sudo rm -f /var/lib/dpkg/info/tdengine* || : +elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + sudo rpm -e --noscripts tdengine || : +fi + +echo -e "${GREEN}TDEngine is removed successfully!${NC}" diff --git a/packaging/tools/repair_link.sh b/packaging/tools/repair_link.sh new file mode 100755 index 000000000000..42b1082a9e84 --- /dev/null +++ b/packaging/tools/repair_link.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# This script is used to repaire links when you what to move TDengine +# data to other places and to access data. + +# Read link path +read -p "Please enter link directory such as /var/lib/taos/tsdb: " linkDir + +while true; do + if [ ! -d $linkDir ]; then + read -p "Paht not exists, please enter the correct link path:" linkDir + continue + fi + break +done + +declare -A dirHash + +for linkFile in $(find -L $linkDir -xtype l); do + targetFile=$(readlink -m $linkFile) + echo "targetFile: ${targetFile}" + # TODO : Extract directory part and basename part + dirName=$(dirname $(dirname ${targetFile})) + baseName=$(basename $(dirname ${targetFile}))/$(basename ${targetFile}) + + # TODO : + newDir="${dirHash["$dirName"]}" + if [ -z "${dirHash["$dirName"]}" ]; then + read -p "Please enter the directory to replace ${dirName}:" newDir + + read -p "Do you want to replcace all[y/N]?" replcaceAll + if [[ ( "${replcaceAll}" == "y") || ( "${replcaceAll}" == "Y") ]]; then + dirHash["$dirName"]="$newDir" + fi + fi + + # Replcace the file + ln -sf "${newDir}/${baseName}" "${linkFile}" +done diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 000000000000..3ed07b45ea5b --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,17 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +SET(PRJ_HEADER_PATH ${CMAKE_CURRENT_SOURCE_DIR}/inc) + +ADD_SUBDIRECTORY(util) +ADD_SUBDIRECTORY(rpc) +ADD_SUBDIRECTORY(client) +ADD_SUBDIRECTORY(sdb) +ADD_SUBDIRECTORY(modules/http) +ADD_SUBDIRECTORY(modules/monitor) +ADD_SUBDIRECTORY(system) +ADD_SUBDIRECTORY(kit/shell) +ADD_SUBDIRECTORY(kit/taosBenchmark) +ADD_SUBDIRECTORY(kit/taosdump) +ADD_SUBDIRECTORY(connector/jdbc) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt new file mode 100755 index 000000000000..6de31a4ff5be --- /dev/null +++ b/src/client/CMakeLists.txt @@ -0,0 +1,30 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc ./jni ./jni/jni) + +# generate dynamic library (*.so) +ADD_LIBRARY(taos SHARED ${SRC}) +ADD_LIBRARY(taos_static STATIC ${SRC}) +TARGET_LINK_LIBRARIES(taos_static trpc tutil pthread m rt) +TARGET_LINK_LIBRARIES(taos trpc tutil pthread m rt) + +# set the static lib name +SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos") + +# enable static lib and so exists +SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) +SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) + +#set version of .so +#VERSION so version +#SOVERSION api version + +execute_process(COMMAND ${PROJECT_SOURCE_DIR}/../../packaging/tools/get_version.sh + OUTPUT_VARIABLE + VERSION_INFO) +MESSAGE(STATUS "build version ${VERSION_INFO}") + +SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) \ No newline at end of file diff --git a/src/client/inc/tscCache.h b/src/client/inc/tscCache.h new file mode 100644 index 000000000000..096a6618f6e9 --- /dev/null +++ b/src/client/inc/tscCache.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCCACHE_H +#define TDENGINE_TSCCACHE_H + +#ifdef __cplusplus +extern "C" { +#endif + +void *taosOpenConnCache(int maxSessions, void (*cleanFp)(void *), void *tmrCtrl, int64_t keepTimer); + +void taosCloseConnCache(void *handle); + +void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, short port, char *user); + +void *taosGetConnFromCache(void *handle, uint32_t ip, short port, char *user); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCACHE_H diff --git a/src/client/inc/tscProfile.h b/src/client/inc/tscProfile.h new file mode 100644 index 000000000000..16b9efac38aa --- /dev/null +++ b/src/client/inc/tscProfile.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCPROFILE_H +#define TDENGINE_TSCPROFILE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tsclient.h" + +void tscAddIntoSqlList(SSqlObj *pSql); +void tscRemoveFromSqlList(SSqlObj *pSql); +void tscAddIntoStreamList(SSqlStream *pStream); +void tscRemoveFromStreamList(SSqlStream *pStream, SSqlObj *pSqlObj); +char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj); +void tscKillQuery(STscObj *pObj, uint32_t killId); +void tscKillStream(STscObj *pObj, uint32_t killId); +void tscKillConnection(STscObj *pObj); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCPROFILE_H diff --git a/src/client/inc/tscSecondaryMerge.h b/src/client/inc/tscSecondaryMerge.h new file mode 100644 index 000000000000..09e23adcb122 --- /dev/null +++ b/src/client/inc/tscSecondaryMerge.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCSECONARYMERGE_H +#define TDENGINE_TSCSECONARYMERGE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "textbuffer.h" +#include "tinterpolation.h" +#include "tlosertree.h" +#include "tsclient.h" + +#define MAX_NUM_OF_SUBQUERY_RETRY 3 + +/* + * @version 0.1 + * @date 2018/01/05 + * @author liaohj + * management of client-side reducer for metric query + */ + +struct SQLFunctionCtx; + +typedef struct SLocalDataSrc { + tExtMemBuffer *pMemBuffer; + int32_t flushoutIdx; + int32_t pageId; + int32_t rowIdx; + tFilePage filePage; +} SLocalDataSrc; + +enum { + TSC_LOCALREDUCE_READY = 0x0, + TSC_LOCALREDUCE_IN_PROGRESS = 0x1, + TSC_LOCALREDUCE_TOBE_FREED = 0x2, +}; + +typedef struct SLocalReducer { + SLocalDataSrc **pLocalDataSrc; + int32_t numOfBuffer; + int32_t numOfCompleted; + + int32_t numOfVnode; + + SLoserTreeInfo *pLoserTree; + char * prevRowOfInput; + + tFilePage *pResultBuf; + int32_t nResultBufSize; + + char *pBufForInterpo; // intermediate buffer for interpolation + + tFilePage *pTempBuffer; + + struct SQLFunctionCtx *pCtx; + + int32_t rowSize; // size of each intermediate result. + int32_t status; // denote it is in reduce process, in reduce process, it + // cannot be released + bool hasPrevRow; + bool hasUnprocessedRow; + + tOrderDescriptor *pDesc; + tColModel * resColModel; + + tExtMemBuffer ** pExtMemBuffer; // disk-based buffer + SInterpolationInfo interpolationInfo; // interpolation support structure + + char *pFinalRes; // result data after interpo + + tFilePage *discardData; + bool discard; + + int32_t offset; +} SLocalReducer; + +typedef struct SRetrieveSupport { + tExtMemBuffer ** pExtMemBuffer; // for build loser tree + tOrderDescriptor *pOrderDescriptor; + tColModel * pFinalColModel; // colModel for final result + + /* + * shared by all subqueries + * It is the number of completed retrieval subquery. + * once this value equals to numOfVnodes, all retrieval are completed. + * Local merge is launched. + */ + int32_t *numOfFinished; + int32_t numOfVnodes; // total number of vnode + int32_t vnodeIdx; // index of current vnode in vnode list + + /* + * shared by all subqueries + * denote the status of query on vnode, if code!=0, all following + * retrieval on vnode are aborted. + */ + int32_t *code; + + SSqlObj * pParentSqlObj; + tFilePage *localBuffer; // temp buffer, there is a buffer for each vnode to + // save data + uint64_t *numOfTotalRetrievedPoints; // total number of points in this query + // retrieved from server + + uint32_t numOfRetry; // record the number of retry times + pthread_mutex_t queryMutex; +} SRetrieveSupport; + +int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc, + tColModel **pFinalModel, uint32_t nBufferSize); + +void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, tColModel *pFinalModel, + int32_t numOfVnodes); + +int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data, + int32_t numOfRows, int32_t orderType); + +int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, int32_t orderType); + +/* + * create local reducer to launch the second-stage reduce process at client site + */ +void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, + tColModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes); + +void tscDestroyLocalReducer(SSqlObj *pSql); + +int32_t tscLocalDoReduce(SSqlObj *pSql); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCSECONARYMERGE_H diff --git a/src/client/inc/tscSyntaxtreefunction.h b/src/client/inc/tscSyntaxtreefunction.h new file mode 100644 index 000000000000..6f91d2f7eded --- /dev/null +++ b/src/client/inc/tscSyntaxtreefunction.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSYNTAXTREEFUNCTION_H +#define TDENGINE_TSYNTAXTREEFUNCTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*_bi_consumer_fn_t)(void *left, void *right, int32_t numOfLeft, int32_t numOfRight, void *output, + int32_t order); + +_bi_consumer_fn_t tGetBiConsumerFn(int32_t leftType, int32_t rightType, int32_t optr); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSYNTAXTREEFUNCTION_H diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h new file mode 100644 index 000000000000..8055ba2f9208 --- /dev/null +++ b/src/client/inc/tscUtil.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCUTIL_H +#define TDENGINE_TSCUTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @date 2018/09/30 + */ +#include +#include "tsdb.h" +#include "tsclient.h" +#include "textbuffer.h" + +#define UTIL_METER_IS_METRIC(cmd) (((cmd)->pMeterMeta != NULL) && ((cmd)->pMeterMeta->meterType == TSDB_METER_METRIC)) + +#define UTIL_METER_IS_NOMRAL_METER(cmd) (!(UTIL_METER_IS_METRIC(cmd))) + +#define UTIL_METER_IS_CREATE_FROM_METRIC(cmd) \ + (((cmd)->pMeterMeta != NULL) && ((cmd)->pMeterMeta->meterType == TSDB_METER_MTABLE)) + +typedef struct SParsedColElem { + int16_t colIndex; + int16_t offset; +} SParsedColElem; + +typedef struct SParsedDataColInfo { + int32_t numOfCols; + int32_t numOfParsedCols; + SParsedColElem elems[TSDB_MAX_COLUMNS]; + bool hasVal[TSDB_MAX_COLUMNS]; +} SParsedDataColInfo; + +SInsertedDataBlocks* tscCreateDataBlock(int32_t size); +void tscDestroyDataBlock(SInsertedDataBlocks** pDataBlock); + +SDataBlockList* tscCreateBlockArrayList(); +void tscDestroyBlockArrayList(SDataBlockList** pList); +int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, SInsertedDataBlocks* pDataBlock); +void tscFreeUnusedDataBlocks(SDataBlockList* pList); + +SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); +SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); + +bool tscProjectionQueryOnMetric(SSqlObj* pSql); +bool tscIsTwoStageMergeMetricQuery(SSqlObj* pSql); + +/** + * + * for the projection query on metric or point interpolation query on metric, + * we iterate all the meters, instead of invoke query on all qualified meters + * simultaneously. + * + * @param pSql sql object + * @return + * + */ +bool tscIsFirstProjQueryOnMetric(SSqlObj* pSql); +bool tscIsPointInterpQuery(SSqlCmd* pCmd); +void tscClearInterpInfo(SSqlCmd* pCmd); + +int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName); + +bool tscIsInsertOrImportData(char* sqlstr); + +/* use for keep current db info temporarily, for handle table with db prefix */ +void tscGetDBInfoFromMeterId(char* meterId, char* db); + +void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr); +bool tscQueryOnMetric(SSqlCmd* pCmd); + +int tscAllocPayloadWithSize(SSqlCmd* pCmd, int size); + +void tscFieldInfoSetValFromSchema(SFieldInfo* pFieldInfo, int32_t index, SSchema* pSchema); +void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* pField); +void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, char* name, int16_t bytes); + +void tscFieldInfoCalOffset(SSqlCmd* pCmd); +void tscFieldInfoRenewOffsetForInterResult(SSqlCmd* pCmd); +void tscFieldInfoClone(SFieldInfo* src, SFieldInfo* dst); + +TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index); +int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); +int32_t tscGetResRowLength(SSqlCmd* pCmd); +void tscClearFieldInfo(SSqlCmd* pCmd); + +void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes); + +SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, + int16_t size); +SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, + int16_t size); + +SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index); +void tscSqlExprClone(SSqlExprInfo* src, SSqlExprInfo* dst); + +SColumnBase* tscColumnInfoInsert(SSqlCmd* pCmd, int32_t colIndex); + +void tscColumnInfoClone(SColumnsInfo* src, SColumnsInfo* dst); +SColumnBase* tscColumnInfoGet(SSqlCmd* pCmd, int32_t index); +void tscColumnInfoReserve(SSqlCmd* pCmd, int32_t size); + +int32_t tscValidateName(SSQLToken* pToken); + +void tscIncStreamExecutionCount(void* pStream); + +bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId); + +// get starter position of metric query condition (query on tags) in +// SSqlCmd.payload +char* tsGetMetricQueryCondPos(STagCond* pCond); +void tscTagCondAssign(STagCond* pDst, STagCond* pSrc); +void tscTagCondRelease(STagCond* pCond); +void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type); + +void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd); + +void tscSetFreeHeatBeat(STscObj* pObj); +bool tscShouldFreeHeatBeat(SSqlObj* pHb); +void tscCleanSqlCmd(SSqlCmd* pCmd); +bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql); +void tscDoQuery(SSqlObj* pSql); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCUTIL_H diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h new file mode 100644 index 000000000000..99dab9236271 --- /dev/null +++ b/src/client/inc/tsclient.h @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCLIENT_H +#define TDENGINE_TSCLIENT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taos.h" +#include "taosmsg.h" +#include "tglobalcfg.h" +#include "tlog.h" +#include "tscCache.h" +#include "tsdb.h" +#include "tsql.h" +#include "tsqlfunction.h" +#include "tutil.h" + +#define TSC_GET_RESPTR_BASE(res, cmd, col, ord) \ + ((res->data + tscFieldInfoGetOffset(cmd, col) * res->numOfRows) + \ + (1 - ord.order) * (res->numOfRows - 1) * tscFieldInfoGetField(cmd, col)->bytes) + +enum _sql_cmd { + TSDB_SQL_SELECT, + TSDB_SQL_FETCH, + TSDB_SQL_INSERT, + + TSDB_SQL_MGMT, // the SQL below is for mgmt node + TSDB_SQL_CREATE_DB, + TSDB_SQL_CREATE_TABLE, + TSDB_SQL_DROP_DB, + TSDB_SQL_DROP_TABLE, + TSDB_SQL_CREATE_ACCT, + TSDB_SQL_CREATE_USER, + TSDB_SQL_DROP_ACCT, // 10 + TSDB_SQL_DROP_USER, + TSDB_SQL_ALTER_USER, + TSDB_SQL_ALTER_ACCT, + TSDB_SQL_ALTER_TABLE, + TSDB_SQL_ALTER_DB, + TSDB_SQL_CREATE_MNODE, + TSDB_SQL_DROP_MNODE, + TSDB_SQL_CREATE_PNODE, + TSDB_SQL_DROP_PNODE, + TSDB_SQL_CFG_PNODE, // 20 + TSDB_SQL_CFG_MNODE, + TSDB_SQL_SHOW, + TSDB_SQL_RETRIEVE, + TSDB_SQL_KILL_QUERY, + TSDB_SQL_KILL_STREAM, + TSDB_SQL_KILL_CONNECTION, + + TSDB_SQL_READ, // SQL below is for read operation + TSDB_SQL_CONNECT, + TSDB_SQL_USE_DB, + TSDB_SQL_META, // 30 + TSDB_SQL_METRIC, + TSDB_SQL_HB, + + TSDB_SQL_LOCAL, // SQL below for client local + TSDB_SQL_DESCRIBE_TABLE, + TSDB_SQL_RETRIEVE_METRIC, + TSDB_SQL_RETRIEVE_TAGS, + TSDB_SQL_RETRIEVE_EMPTY_RESULT, // build empty result instead of accessing + // dnode to fetch result + TSDB_SQL_RESET_CACHE, // reset the client cache + TSDB_SQL_CFG_LOCAL, + + TSDB_SQL_MAX +}; + +// forward declaration +struct SSqlInfo; + +typedef struct SSqlGroupbyExpr { + int16_t numOfGroupbyCols; + int16_t tagIndex[TSDB_MAX_TAGS]; /* group by columns information */ + int16_t orderIdx; /* order by column index */ + int16_t orderType; /* order by type: asc/desc */ +} SSqlGroupbyExpr; + +/* the structure for sql function in select clause */ +typedef struct SSqlExpr { + char aliasName[TSDB_COL_NAME_LEN + 1]; // as aliasName + + SColIndex colInfo; + int16_t sqlFuncId; // function id in aAgg array + + int16_t resType; // return value type + int16_t resBytes; // length of return value + + int16_t numOfParams; // argument value of each function + tVariant param[3]; // parameters are not more than 3 +} SSqlExpr; + +typedef struct SFieldInfo { + int16_t numOfOutputCols; // number of column in result + int16_t numOfAlloc; // allocated size + TAOS_FIELD *pFields; + short * pOffset; +} SFieldInfo; + +typedef struct SSqlExprInfo { + int16_t numOfAlloc; + int16_t numOfExprs; + SSqlExpr *pExprs; +} SSqlExprInfo; + +typedef struct SColumnBase { + int16_t colIndex; + + /* todo refactor: the following data is belong to one struct */ + int16_t filterOn; /* denote if the filter is active */ + int16_t lowerRelOptr; + int16_t upperRelOptr; + int16_t filterOnBinary; /* denote if current column is binary */ + + union { + struct { + int64_t lowerBndi; + int64_t upperBndi; + }; + struct { + double lowerBndd; + double upperBndd; + }; + struct { + int64_t pz; + int64_t len; + }; + }; +} SColumnBase; + +typedef struct SColumnsInfo { + int16_t numOfAlloc; + int16_t numOfCols; + SColumnBase *pColList; +} SColumnsInfo; + +struct SLocalReducer; + +typedef struct STagCond { + int32_t len; + int32_t allocSize; + int16_t type; + char * pData; +} STagCond; + +typedef struct SInsertedDataBlocks { + char meterId[TSDB_METER_ID_LEN]; + int64_t size; + uint32_t nAllocSize; + uint32_t numOfMeters; + union { + char *filename; + char *pData; + }; +} SInsertedDataBlocks; + +typedef struct SDataBlockList { + int32_t idx; + int32_t nSize; + int32_t nAlloc; + char * userParam; /* user assigned parameters for async query */ + void * udfp; /* user defined function pointer, used in async model */ + SInsertedDataBlocks **pData; +} SDataBlockList; + +typedef struct { + char name[TSDB_METER_ID_LEN]; + SOrderVal order; + int command; + int count; + int16_t isInsertFromFile; // load data from file or not + int16_t metricQuery; // metric query or not + bool existsCheck; + char msgType; + char type; + char intervalTimeUnit; + int64_t etime; + int64_t stime; + int64_t nAggTimeInterval; // aggregation time interval + int64_t nSlidingTime; // sliding window in mseconds + SSqlGroupbyExpr groupbyExpr; // group by tags info + + /* + * use to keep short request msg and error msg, in such case, SSqlCmd->payload == SSqlCmd->ext; + * create table/query/insert operations will exceed the TSDB_SQLCMD_SIZE. + * + * In such cases, allocate the memory dynamically, and need to free the memory + */ + uint32_t allocSize; + char * payload; + int payloadLen; + short numOfCols; + SColumnsInfo colList; + SFieldInfo fieldsInfo; + SSqlExprInfo exprsInfo; + int16_t numOfReqTags; // total required tags in query, inlcuding groupby clause + tag projection + int16_t tagColumnIndex[TSDB_MAX_TAGS + 1]; + SLimitVal limit; + int64_t globalLimit; + SLimitVal glimit; + STagCond tagCond; + int16_t vnodeIdx; // vnode index in pMetricMeta for metric query + int16_t interpoType; // interpolate type + + SDataBlockList *pDataBlocks; // submit data blocks branched according to vnode + SMeterMeta * pMeterMeta; // metermeta + SMetricMeta * pMetricMeta; // metricmeta + + // todo use dynamic allocated memory for defaultVal + int64_t defaultVal[TSDB_MAX_COLUMNS]; // default value for interpolation +} SSqlCmd; + +typedef struct SResRec { + int numOfRows; + int numOfTotal; +} SResRec; + +typedef struct { + uint8_t code; + int numOfRows; // num of results in current retrieved + int numOfTotal; // num of total results + char * pRsp; + int rspType; + int rspLen; + uint64_t qhandle; + int64_t useconds; + int64_t offset; // offset value from vnode during projection query of stable + int row; + int16_t numOfnchar; + int16_t precision; + int32_t numOfGroups; + SResRec *pGroupRec; + char * data; + short * bytes; + void ** tsrow; + + // Buffer used to put multibytes encoded using unicode (wchar_t) + char ** buffer; + struct SLocalReducer *pLocalReducer; +} SSqlRes; + +typedef struct _tsc_obj { + void * signature; + void * pTimer; + char mgmtIp[TSDB_USER_LEN]; + short mgmtPort; + char user[TSDB_USER_LEN]; + char pass[TSDB_KEY_LEN]; + char acctId[TSDB_DB_NAME_LEN]; + char db[TSDB_DB_NAME_LEN]; + char sversion[TSDB_VERSION_LEN]; + char writeAuth : 1; + char superAuth : 1; + struct _sql_obj *pSql; + struct _sql_obj *pHb; + struct _sql_obj *sqlList; + struct _sstream *streamList; + pthread_mutex_t mutex; +} STscObj; + +typedef struct _sql_obj { + void * signature; + STscObj *pTscObj; + void (*fp)(); + void (*fetchFp)(); + void * param; + uint32_t ip; + short vnode; + int64_t stime; + uint32_t queryId; + void * thandle; + void * pStream; + char * sqlstr; + char retry; + char maxRetry; + char index; + char freed : 4; + char listed : 4; + sem_t rspSem; + sem_t emptyRspSem; + + SSqlCmd cmd; + SSqlRes res; + + char numOfSubs; + struct _sql_obj **pSubs; + struct _sql_obj * prev, *next; +} SSqlObj; + +typedef struct _sstream { + SSqlObj *pSql; + uint32_t streamId; + char listed; + int64_t num; // number of computing count + + /* + * bookmark the current number of result in computing, + * the value will be set to 0 before set timer for next computing + */ + int64_t numOfRes; + + int64_t useconds; // total elapsed time + int64_t ctime; // stream created time + int64_t stime; // stream next executed time + int64_t etime; // stream end query time, when time is larger then etime, the + // stream will be closed + int64_t interval; + int64_t slidingTime; + void * pTimer; + + void (*fp)(); + void *param; + + // Call backfunction when stream is stopped from client level + void (*callback)(void *); + struct _sstream *prev, *next; +} SSqlStream; + +typedef struct { + char numOfIps; + uint32_t ip[TSDB_MAX_MGMT_IPS]; + char ipstr[TSDB_MAX_MGMT_IPS][TSDB_IPv4ADDR_LEN]; +} SIpStrList; + +// tscSql API +int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion); + +void tscInitMsgs(); +void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle); +int tscProcessSql(SSqlObj *pSql); +int tscGetMeterMeta(SSqlObj *pSql, char *meterId); +int tscGetMeterMetaEx(SSqlObj *pSql, char *meterId, bool createIfNotExists); + +void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows); + +int tscRenewMeterMeta(SSqlObj *pSql, char *meterId); +void tscQueueAsyncRes(SSqlObj *pSql); +int tscGetMetricMeta(SSqlObj *pSql, char *meterId); +void tscQueueAsyncError(void(*fp), void *param); + +int tscProcessLocalCmd(SSqlObj *pSql); +int tscCfgDynamicOptions(char *msg); +int taos_retrieve(TAOS_RES *res); + +/* + * transfer function for metric query in stream computing, the function need to be change + * before send query message to vnode + */ +void tscTansformSQLFunctionForMetricQuery(SSqlCmd *pCmd); +void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd); + +/** + * release both metric/meter meta information + * @param pCmd SSqlCmd object that contains the metric/meter meta info + */ +void tscClearSqlMetaInfo(SSqlCmd *pCmd); + +void tscClearSqlMetaInfoForce(SSqlCmd *pCmd); + +int32_t tscCreateResPointerInfo(SSqlCmd *pCmd, SSqlRes *pRes); +void tscDestroyResPointerInfo(SSqlRes *pRes); + +void tscfreeSqlCmdData(SSqlCmd *pCmd); + +/** + * only free part of resources allocated during query. + * Note: this function is multi-thread safe. + * @param pObj + */ +void tscFreeSqlObjPartial(SSqlObj *pObj); + +/** + * free sql object, release allocated resource + * @param pObj Free metric/meta information, dynamically allocated payload, and + * response buffer, object itself + */ +void tscFreeSqlObj(SSqlObj *pObj); + +void tscCloseTscObj(STscObj *pObj); + +// +// support functions for async metric query. +// we declare them as global visible functions, because we need them to check if a +// failed async query in tscMeterMetaCallBack is a metric query or not. + +// expr: (fp == tscRetrieveDataRes or fp == tscRetrieveFromVnodeCallBack) +// If a query is async query, we simply abort current query process, instead of continuing +// +void tscRetrieveDataRes(void *param, TAOS_RES *tres, int numOfRows); +void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); + +void tscProcessMultiVnodesInsert(SSqlObj *pSql); +void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql); +void tscKillMetricQuery(SSqlObj *pSql); +void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); +int32_t tscBuildResultsForEmptyRetrieval(SSqlObj *pSql); + +// transfer SSqlInfo to SqlCmd struct +int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); + +void tscQueueAsyncFreeResult(SSqlObj *pSql); + +extern void * pVnodeConn; +extern void * pTscMgmtConn; +extern void * tscCacheHandle; +extern uint8_t globalCode; +extern int slaveIndex; +extern void * tscTmr; +extern void * tscConnCache; +extern void * tscQhandle; +extern int tscKeepConn[]; +extern int tsInsertHeadSize; +extern int tscNumOfThreads; +extern char tsServerIpStr[128]; +extern uint32_t tsServerIp; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h new file mode 100755 index 000000000000..9f9632cadc3b --- /dev/null +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -0,0 +1,151 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class com_taosdata_jdbc_TSDBJNIConnector */ + +#ifndef _Included_com_taosdata_jdbc_TSDBJNIConnector +#define _Included_com_taosdata_jdbc_TSDBJNIConnector +#ifdef __cplusplus +extern "C" { +#endif +#undef com_taosdata_jdbc_TSDBJNIConnector_INVALID_CONNECTION_POINTER_VALUE +#define com_taosdata_jdbc_TSDBJNIConnector_INVALID_CONNECTION_POINTER_VALUE 0LL +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: initImp + * Signature: (Ljava/lang/String;)V + */ +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp + (JNIEnv *, jclass, jstring); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: setOptions + * Signature: (ILjava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions + (JNIEnv *, jclass, jint, jstring); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getTsCharset + * Signature: ()Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset + (JNIEnv *, jclass); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: connectImp + * Signature: (Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)J + */ +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp + (JNIEnv *, jobject, jstring, jint, jstring, jstring, jstring); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: executeQueryImp + * Signature: ([BJ)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp + (JNIEnv *, jobject, jbyteArray, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getErrCodeImp + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getErrMsgImp + * Signature: (J)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getResultSetImp + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: freeResultSetImp + * Signature: (JJ)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp + (JNIEnv *, jobject, jlong, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getAffectedRowsImp + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: getSchemaMetaDataImp + * Signature: (JJLjava/util/List;)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaDataImp + (JNIEnv *, jobject, jlong, jlong, jobject); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: fetchRowImp + * Signature: (JJLcom/taosdata/jdbc/TSDBResultSetRowData;)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp + (JNIEnv *, jobject, jlong, jlong, jobject); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: closeConnectionImp + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: subscribeImp + * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;JI)J + */ +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp + (JNIEnv *, jobject, jstring, jstring, jstring, jstring, jstring, jlong, jint); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: consumeImp + * Signature: (J)Lcom/taosdata/jdbc/TSDBResultSetRowData; + */ +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: unsubscribeImp + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp + (JNIEnv *, jobject, jlong); + +/* + * Class: com_taosdata_jdbc_TSDBJNIConnector + * Method: validateCreateTableSqlImp + * Signature: (J[B)I + */ +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp + (JNIEnv *, jobject, jlong, jbyteArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/src/client/jni/jni/AWTCocoaComponent.h b/src/client/jni/jni/AWTCocoaComponent.h new file mode 100755 index 000000000000..0373324d2cc9 --- /dev/null +++ b/src/client/jni/jni/AWTCocoaComponent.h @@ -0,0 +1,15 @@ +// +// AWTCocoaComponent.h +// +// Copyright (c) 2003 Apple Computer Inc. All rights reserved. +// + +#import +#import + +// This is implemented by a com.apple.eawt.CocoaComponent. It receives messages +// from java safely on the AppKit thread. See the com.apple.eawt.CocoaComponent +// java documentation for more information. +@protocol AWTCocoaComponent +-(void)awtMessage:(jint)messageID message:(jobject)message env:(JNIEnv*)env DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +@end diff --git a/src/client/jni/jni/JDWP.h b/src/client/jni/jni/JDWP.h new file mode 100755 index 000000000000..1e84aaad490d --- /dev/null +++ b/src/client/jni/jni/JDWP.h @@ -0,0 +1,53 @@ +/* + * @(#)JDWP.h 1.33 05/11/17 + * + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + +#ifndef JDWP_JDWP_H +#define JDWP_JDWP_H + +#include "JDWPCommands.h" + +/* + * JDWPCommands.h is the javah'ed version of all the constants defined + * com.sun.tools.jdi.JDWP and all its nested classes. Since the names are + * very long, the macros below are provided for convenience. + */ + +#define JDWP_COMMAND_SET(name) JDWP_ ## name +#define JDWP_COMMAND(set, name) JDWP_ ## set ## _ ## name +#define JDWP_REQUEST_MODIFIER(name) \ + JDWP_EventRequest_Set_Out_modifiers_Modifier_ ## name +#define JDWP_EVENT(name) \ + JDWP_EventKind_ ## name +#define JDWP_THREAD_STATUS(name) \ + JDWP_ThreadStatus_ ## name +#define JDWP_SUSPEND_STATUS(name) \ + JDWP_SuspendStatus_SUSPEND_STATUS_ ## name +#define JDWP_CLASS_STATUS(name) \ + JDWP_ClassStatus_ ## name +#define JDWP_TYPE_TAG(name) \ + JDWP_TypeTag_ ## name +#define JDWP_TAG(name) \ + JDWP_Tag_ ## name +#define JDWP_STEP_DEPTH(name) \ + JDWP_StepDepth_ ## name +#define JDWP_STEP_SIZE(name) \ + JDWP_StepSize_ ## name +#define JDWP_SUSPEND_POLICY(name) \ + JDWP_SuspendPolicy_ ## name +#define JDWP_INVOKE_OPTIONS(name) \ + JDWP_InvokeOptions_INVOKE_ ## name +#define JDWP_ERROR(name) \ + JDWP_Error_ ## name +#define JDWP_HIGHEST_COMMAND_SET 17 +#define JDWP_REQUEST_NONE -1 + +/* This typedef helps keep the event and error types straight. */ +typedef unsigned short jdwpError; +typedef unsigned char jdwpEvent; +typedef jint jdwpThreadStatus; + +#endif diff --git a/src/client/jni/jni/JDWPCommands.h b/src/client/jni/jni/JDWPCommands.h new file mode 100755 index 000000000000..fc898767ff9b --- /dev/null +++ b/src/client/jni/jni/JDWPCommands.h @@ -0,0 +1,257 @@ +#define JDWP_VirtualMachine 1 +#define JDWP_VirtualMachine_Version 1 +#define JDWP_VirtualMachine_ClassesBySignature 2 +#define JDWP_VirtualMachine_AllClasses 3 +#define JDWP_VirtualMachine_AllThreads 4 +#define JDWP_VirtualMachine_TopLevelThreadGroups 5 +#define JDWP_VirtualMachine_Dispose 6 +#define JDWP_VirtualMachine_IDSizes 7 +#define JDWP_VirtualMachine_Suspend 8 +#define JDWP_VirtualMachine_Resume 9 +#define JDWP_VirtualMachine_Exit 10 +#define JDWP_VirtualMachine_CreateString 11 +#define JDWP_VirtualMachine_Capabilities 12 +#define JDWP_VirtualMachine_ClassPaths 13 +#define JDWP_VirtualMachine_DisposeObjects 14 +#define JDWP_VirtualMachine_HoldEvents 15 +#define JDWP_VirtualMachine_ReleaseEvents 16 +#define JDWP_VirtualMachine_CapabilitiesNew 17 +#define JDWP_VirtualMachine_RedefineClasses 18 +#define JDWP_VirtualMachine_SetDefaultStratum 19 +#define JDWP_VirtualMachine_AllClassesWithGeneric 20 +#define JDWP_VirtualMachine_InstanceCounts 21 +#define JDWP_ReferenceType 2 +#define JDWP_ReferenceType_Signature 1 +#define JDWP_ReferenceType_ClassLoader 2 +#define JDWP_ReferenceType_Modifiers 3 +#define JDWP_ReferenceType_Fields 4 +#define JDWP_ReferenceType_Methods 5 +#define JDWP_ReferenceType_GetValues 6 +#define JDWP_ReferenceType_SourceFile 7 +#define JDWP_ReferenceType_NestedTypes 8 +#define JDWP_ReferenceType_Status 9 +#define JDWP_ReferenceType_Interfaces 10 +#define JDWP_ReferenceType_ClassObject 11 +#define JDWP_ReferenceType_SourceDebugExtension 12 +#define JDWP_ReferenceType_SignatureWithGeneric 13 +#define JDWP_ReferenceType_FieldsWithGeneric 14 +#define JDWP_ReferenceType_MethodsWithGeneric 15 +#define JDWP_ReferenceType_Instances 16 +#define JDWP_ReferenceType_ClassFileVersion 17 +#define JDWP_ReferenceType_ConstantPool 18 +#define JDWP_ClassType 3 +#define JDWP_ClassType_Superclass 1 +#define JDWP_ClassType_SetValues 2 +#define JDWP_ClassType_InvokeMethod 3 +#define JDWP_ClassType_NewInstance 4 +#define JDWP_ArrayType 4 +#define JDWP_ArrayType_NewInstance 1 +#define JDWP_InterfaceType 5 +#define JDWP_Method 6 +#define JDWP_Method_LineTable 1 +#define JDWP_Method_VariableTable 2 +#define JDWP_Method_Bytecodes 3 +#define JDWP_Method_IsObsolete 4 +#define JDWP_Method_VariableTableWithGeneric 5 +#define JDWP_Field 8 +#define JDWP_ObjectReference 9 +#define JDWP_ObjectReference_ReferenceType 1 +#define JDWP_ObjectReference_GetValues 2 +#define JDWP_ObjectReference_SetValues 3 +#define JDWP_ObjectReference_MonitorInfo 5 +#define JDWP_ObjectReference_InvokeMethod 6 +#define JDWP_ObjectReference_DisableCollection 7 +#define JDWP_ObjectReference_EnableCollection 8 +#define JDWP_ObjectReference_IsCollected 9 +#define JDWP_ObjectReference_ReferringObjects 10 +#define JDWP_StringReference 10 +#define JDWP_StringReference_Value 1 +#define JDWP_ThreadReference 11 +#define JDWP_ThreadReference_Name 1 +#define JDWP_ThreadReference_Suspend 2 +#define JDWP_ThreadReference_Resume 3 +#define JDWP_ThreadReference_Status 4 +#define JDWP_ThreadReference_ThreadGroup 5 +#define JDWP_ThreadReference_Frames 6 +#define JDWP_ThreadReference_FrameCount 7 +#define JDWP_ThreadReference_OwnedMonitors 8 +#define JDWP_ThreadReference_CurrentContendedMonitor 9 +#define JDWP_ThreadReference_Stop 10 +#define JDWP_ThreadReference_Interrupt 11 +#define JDWP_ThreadReference_SuspendCount 12 +#define JDWP_ThreadReference_OwnedMonitorsStackDepthInfo 13 +#define JDWP_ThreadReference_ForceEarlyReturn 14 +#define JDWP_ThreadGroupReference 12 +#define JDWP_ThreadGroupReference_Name 1 +#define JDWP_ThreadGroupReference_Parent 2 +#define JDWP_ThreadGroupReference_Children 3 +#define JDWP_ArrayReference 13 +#define JDWP_ArrayReference_Length 1 +#define JDWP_ArrayReference_GetValues 2 +#define JDWP_ArrayReference_SetValues 3 +#define JDWP_ClassLoaderReference 14 +#define JDWP_ClassLoaderReference_VisibleClasses 1 +#define JDWP_EventRequest 15 +#define JDWP_EventRequest_Set 1 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_Count 1 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_Conditional 2 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_ThreadOnly 3 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_ClassOnly 4 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_ClassMatch 5 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_ClassExclude 6 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_LocationOnly 7 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_ExceptionOnly 8 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_FieldOnly 9 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_Step 10 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_InstanceOnly 11 +#define JDWP_EventRequest_Set_Out_modifiers_Modifier_SourceNameMatch 12 +#define JDWP_EventRequest_Clear 2 +#define JDWP_EventRequest_ClearAllBreakpoints 3 +#define JDWP_StackFrame 16 +#define JDWP_StackFrame_GetValues 1 +#define JDWP_StackFrame_SetValues 2 +#define JDWP_StackFrame_ThisObject 3 +#define JDWP_StackFrame_PopFrames 4 +#define JDWP_ClassObjectReference 17 +#define JDWP_ClassObjectReference_ReflectedType 1 +#define JDWP_Event 64 +#define JDWP_Event_Composite 100 +#define JDWP_Event_Composite_Event_events_Events_VMStart JDWP.EventKind.VM_START +#define JDWP_Event_Composite_Event_events_Events_SingleStep JDWP.EventKind.SINGLE_STEP +#define JDWP_Event_Composite_Event_events_Events_Breakpoint JDWP.EventKind.BREAKPOINT +#define JDWP_Event_Composite_Event_events_Events_MethodEntry JDWP.EventKind.METHOD_ENTRY +#define JDWP_Event_Composite_Event_events_Events_MethodExit JDWP.EventKind.METHOD_EXIT +#define JDWP_Event_Composite_Event_events_Events_MethodExitWithReturnValue JDWP.EventKind.METHOD_EXIT_WITH_RETURN_VALUE +#define JDWP_Event_Composite_Event_events_Events_MonitorContendedEnter JDWP.EventKind.MONITOR_CONTENDED_ENTER +#define JDWP_Event_Composite_Event_events_Events_MonitorContendedEntered JDWP.EventKind.MONITOR_CONTENDED_ENTERED +#define JDWP_Event_Composite_Event_events_Events_MonitorWait JDWP.EventKind.MONITOR_WAIT +#define JDWP_Event_Composite_Event_events_Events_MonitorWaited JDWP.EventKind.MONITOR_WAITED +#define JDWP_Event_Composite_Event_events_Events_Exception JDWP.EventKind.EXCEPTION +#define JDWP_Event_Composite_Event_events_Events_ThreadStart JDWP.EventKind.THREAD_START +#define JDWP_Event_Composite_Event_events_Events_ThreadDeath JDWP.EventKind.THREAD_DEATH +#define JDWP_Event_Composite_Event_events_Events_ClassPrepare JDWP.EventKind.CLASS_PREPARE +#define JDWP_Event_Composite_Event_events_Events_ClassUnload JDWP.EventKind.CLASS_UNLOAD +#define JDWP_Event_Composite_Event_events_Events_FieldAccess JDWP.EventKind.FIELD_ACCESS +#define JDWP_Event_Composite_Event_events_Events_FieldModification JDWP.EventKind.FIELD_MODIFICATION +#define JDWP_Event_Composite_Event_events_Events_VMDeath JDWP.EventKind.VM_DEATH +#define JDWP_Error_NONE 0 +#define JDWP_Error_INVALID_THREAD 10 +#define JDWP_Error_INVALID_THREAD_GROUP 11 +#define JDWP_Error_INVALID_PRIORITY 12 +#define JDWP_Error_THREAD_NOT_SUSPENDED 13 +#define JDWP_Error_THREAD_SUSPENDED 14 +#define JDWP_Error_THREAD_NOT_ALIVE 15 +#define JDWP_Error_INVALID_OBJECT 20 +#define JDWP_Error_INVALID_CLASS 21 +#define JDWP_Error_CLASS_NOT_PREPARED 22 +#define JDWP_Error_INVALID_METHODID 23 +#define JDWP_Error_INVALID_LOCATION 24 +#define JDWP_Error_INVALID_FIELDID 25 +#define JDWP_Error_INVALID_FRAMEID 30 +#define JDWP_Error_NO_MORE_FRAMES 31 +#define JDWP_Error_OPAQUE_FRAME 32 +#define JDWP_Error_NOT_CURRENT_FRAME 33 +#define JDWP_Error_TYPE_MISMATCH 34 +#define JDWP_Error_INVALID_SLOT 35 +#define JDWP_Error_DUPLICATE 40 +#define JDWP_Error_NOT_FOUND 41 +#define JDWP_Error_INVALID_MONITOR 50 +#define JDWP_Error_NOT_MONITOR_OWNER 51 +#define JDWP_Error_INTERRUPT 52 +#define JDWP_Error_INVALID_CLASS_FORMAT 60 +#define JDWP_Error_CIRCULAR_CLASS_DEFINITION 61 +#define JDWP_Error_FAILS_VERIFICATION 62 +#define JDWP_Error_ADD_METHOD_NOT_IMPLEMENTED 63 +#define JDWP_Error_SCHEMA_CHANGE_NOT_IMPLEMENTED 64 +#define JDWP_Error_INVALID_TYPESTATE 65 +#define JDWP_Error_HIERARCHY_CHANGE_NOT_IMPLEMENTED 66 +#define JDWP_Error_DELETE_METHOD_NOT_IMPLEMENTED 67 +#define JDWP_Error_UNSUPPORTED_VERSION 68 +#define JDWP_Error_NAMES_DONT_MATCH 69 +#define JDWP_Error_CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED 70 +#define JDWP_Error_METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED 71 +#define JDWP_Error_NOT_IMPLEMENTED 99 +#define JDWP_Error_NULL_POINTER 100 +#define JDWP_Error_ABSENT_INFORMATION 101 +#define JDWP_Error_INVALID_EVENT_TYPE 102 +#define JDWP_Error_ILLEGAL_ARGUMENT 103 +#define JDWP_Error_OUT_OF_MEMORY 110 +#define JDWP_Error_ACCESS_DENIED 111 +#define JDWP_Error_VM_DEAD 112 +#define JDWP_Error_INTERNAL 113 +#define JDWP_Error_UNATTACHED_THREAD 115 +#define JDWP_Error_INVALID_TAG 500 +#define JDWP_Error_ALREADY_INVOKING 502 +#define JDWP_Error_INVALID_INDEX 503 +#define JDWP_Error_INVALID_LENGTH 504 +#define JDWP_Error_INVALID_STRING 506 +#define JDWP_Error_INVALID_CLASS_LOADER 507 +#define JDWP_Error_INVALID_ARRAY 508 +#define JDWP_Error_TRANSPORT_LOAD 509 +#define JDWP_Error_TRANSPORT_INIT 510 +#define JDWP_Error_NATIVE_METHOD 511 +#define JDWP_Error_INVALID_COUNT 512 +#define JDWP_EventKind_SINGLE_STEP 1 +#define JDWP_EventKind_BREAKPOINT 2 +#define JDWP_EventKind_FRAME_POP 3 +#define JDWP_EventKind_EXCEPTION 4 +#define JDWP_EventKind_USER_DEFINED 5 +#define JDWP_EventKind_THREAD_START 6 +#define JDWP_EventKind_THREAD_DEATH 7 +#define JDWP_EventKind_THREAD_END 7 +#define JDWP_EventKind_CLASS_PREPARE 8 +#define JDWP_EventKind_CLASS_UNLOAD 9 +#define JDWP_EventKind_CLASS_LOAD 10 +#define JDWP_EventKind_FIELD_ACCESS 20 +#define JDWP_EventKind_FIELD_MODIFICATION 21 +#define JDWP_EventKind_EXCEPTION_CATCH 30 +#define JDWP_EventKind_METHOD_ENTRY 40 +#define JDWP_EventKind_METHOD_EXIT 41 +#define JDWP_EventKind_METHOD_EXIT_WITH_RETURN_VALUE 42 +#define JDWP_EventKind_MONITOR_CONTENDED_ENTER 43 +#define JDWP_EventKind_MONITOR_CONTENDED_ENTERED 44 +#define JDWP_EventKind_MONITOR_WAIT 45 +#define JDWP_EventKind_MONITOR_WAITED 46 +#define JDWP_EventKind_VM_START 90 +#define JDWP_EventKind_VM_INIT 90 +#define JDWP_EventKind_VM_DEATH 99 +#define JDWP_EventKind_VM_DISCONNECTED 100 +#define JDWP_ThreadStatus_ZOMBIE 0 +#define JDWP_ThreadStatus_RUNNING 1 +#define JDWP_ThreadStatus_SLEEPING 2 +#define JDWP_ThreadStatus_MONITOR 3 +#define JDWP_ThreadStatus_WAIT 4 +#define JDWP_SuspendStatus_SUSPEND_STATUS_SUSPENDED 0x1 +#define JDWP_ClassStatus_VERIFIED 1 +#define JDWP_ClassStatus_PREPARED 2 +#define JDWP_ClassStatus_INITIALIZED 4 +#define JDWP_ClassStatus_ERROR 8 +#define JDWP_TypeTag_CLASS 1 +#define JDWP_TypeTag_INTERFACE 2 +#define JDWP_TypeTag_ARRAY 3 +#define JDWP_Tag_ARRAY 91 +#define JDWP_Tag_BYTE 66 +#define JDWP_Tag_CHAR 67 +#define JDWP_Tag_OBJECT 76 +#define JDWP_Tag_FLOAT 70 +#define JDWP_Tag_DOUBLE 68 +#define JDWP_Tag_INT 73 +#define JDWP_Tag_LONG 74 +#define JDWP_Tag_SHORT 83 +#define JDWP_Tag_VOID 86 +#define JDWP_Tag_BOOLEAN 90 +#define JDWP_Tag_STRING 115 +#define JDWP_Tag_THREAD 116 +#define JDWP_Tag_THREAD_GROUP 103 +#define JDWP_Tag_CLASS_LOADER 108 +#define JDWP_Tag_CLASS_OBJECT 99 +#define JDWP_StepDepth_INTO 0 +#define JDWP_StepDepth_OVER 1 +#define JDWP_StepDepth_OUT 2 +#define JDWP_StepSize_MIN 0 +#define JDWP_StepSize_LINE 1 +#define JDWP_SuspendPolicy_NONE 0 +#define JDWP_SuspendPolicy_EVENT_THREAD 1 +#define JDWP_SuspendPolicy_ALL 2 +#define JDWP_InvokeOptions_INVOKE_SINGLE_THREADED 0x01 +#define JDWP_InvokeOptions_INVOKE_NONVIRTUAL 0x02 diff --git a/src/client/jni/jni/JavaVM.h b/src/client/jni/jni/JavaVM.h new file mode 100755 index 000000000000..6926ff912900 --- /dev/null +++ b/src/client/jni/jni/JavaVM.h @@ -0,0 +1,11 @@ +/* + * JavaVM.h + * + * Copyright (C) 1997-2001, Apple Computer, Inc. + * All Rights Reserved. + * + */ + +#import +#import + diff --git a/src/client/jni/jni/NSJavaConfiguration.h b/src/client/jni/jni/NSJavaConfiguration.h new file mode 100755 index 000000000000..e279340bfa19 --- /dev/null +++ b/src/client/jni/jni/NSJavaConfiguration.h @@ -0,0 +1,79 @@ +/* + * NSJavaConfiguration.h + * + * Copyright (c) 1997-2001, Apple Computer, Inc. + * All Rights Reserved. + * + * LaurentR- April, 2000 + * - added: + * NSDefaultJavaLibraryKey + * NSDefaultJavaDebugLibraryKey + * NSDefaultObjCJavaLibraryKey + * NSDefaultObjCJavaDebugLibraryKey + * NSJavaVMArgumentsKey + */ + +#import + +// The configuration dictionary contains a set of vendor-specific key/value +// pairs and a set of default key/value pairs. If no vendor is specified, +// NSJavaConfiguration uses the NSDefaultJavaVendorKey key to determine which +// vendor-specific dictionary should be searched before the top-level dictionary// is searched. eg.: +/* + { + Vendor = sun; + default = { + DefaultClasspath = "/NextLibrary/Java"; + }; + next = { + Compiler = "/usr/bin/javac"; + VM = "/usr/bin/java"; + }; + sun = { + Compiler = "/NextLibrary/JDK/bin/javac"; + VM = "/NextLibrary/JDK/bin/java"; + }; + } +*/ +// In this case, if no vendor is specified, the `sun' mappings will be searched +// first. The value for `VM' would be "/NextLibrary/JDK/bin/java" and the value +// for `DefaultClasspath' would be "/NextLibrary/Java". +// +// This search patter is applied to three dictionaries, in order: +// - the JavaConfiguration dictionary in the defaults for the application +// - the dictionary in the "JavaConfiguration" domain of the user defaults +// - the configuration file (/NextLibrary/Java/JavaConfig.plist). +// This permits per-application, per-user and per-system specifications. + + +extern NSString *NSDefaultJavaVendorKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +extern NSString *NSDefaultJavaVMKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSDefaultJavaCompilerKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSDefaultJavaClassPathKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSDefaultJavaLibraryKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSDefaultJavaDebugLibraryKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSDefaultObjCJavaLibraryKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSDefaultObjCJavaDebugLibraryKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +extern NSString *NSJavaVMArgumentsKey DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + + +@interface NSJavaConfiguration : NSObject +{ + NSString *_vendorName; +} + ++ (NSJavaConfiguration *) defaultConfiguration DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + ++ (NSJavaConfiguration *) configurationForVendor:(NSString *)vendorName DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; ++ (NSArray *) vendorNames DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +- init DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +- initWithVendor:(NSString *)vendorName DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +- (NSString *) vendorName DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +- valueForKey:(NSString *)keyName DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +- valueForKey:(NSString *)keyName expandEnvironmentVariables:(BOOL)flag DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +@end + diff --git a/src/client/jni/jni/NSJavaVirtualMachine.h b/src/client/jni/jni/NSJavaVirtualMachine.h new file mode 100755 index 000000000000..1e7d74c27b3a --- /dev/null +++ b/src/client/jni/jni/NSJavaVirtualMachine.h @@ -0,0 +1,61 @@ +/* + * NSJavaVirtualMachine.h + * + * Copyright (c) 1997-2001, Apple Computer, Inc. + * All Rights Reserved. + */ + +#import + +@interface NSJavaVirtualMachine : NSObject +{ +@private + void *_vmdata; +} + + +// Returns the default virtual machine - if necessary, calls alloc + init + ++ (id) defaultVirtualMachine DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +// Returns a class path. First checks NSProcessInfo for an environment variable +// called CLASSPATH and if that doesn't exist, uses NSJavaConfiguration to find +// the default class path. + ++ (NSString *) defaultClassPath DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +// Note that any NSThreads spawned after this method returns will automatically +// be attached to the virtual machine. Likewise, it is not necessary to attach +// the thread that is actually creating the virtual machine. If you spawn a +// thread before creating the virtual machine, or if you use the cthread/pthread +// or any other non-NSThread api for creating a thread, you must explicitly +// attach those threads before messaging any Java object from that thread. +// This is most easily done by using the -attachCurrentThread method. +// Use -detachCurrentThread to detach explicitly attached threads when done. + +- initWithClassPath:(NSString *)classPath DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +- (void) attachCurrentThread DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +- (void) detachCurrentThread DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +- (Class)findClass:(NSString *)className DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; +- (Class)defineClass:(NSData *)javaClassData withName:(NSString *)className DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +@end + + +@interface NSObject (InstantiatingJavaObjects) + +// Instantiating java objects for when no -init/constructor mapping works. +// The class these methods are invoked on *must* be a class returned by the +// -findClass: method (or NSClassFromString() function), otherwise +// NSInvalidJavaClassException is raised. The signature is specified using the +// rather counter-intuitive format defined by the Java Virtual Machine +// specification. Try looking in JavaVM/vm-interface.h for help. + ++ (id) newWithSignature:(NSString *)signature, ... DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; ++ (id) newWithSignature:(NSString *)signature arguments:(va_list)args DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; + +@end + +extern NSString *NSInvalidJavaClassException DEPRECATED_IN_MAC_OS_X_VERSION_10_6_AND_LATER; diff --git a/src/client/jni/jni/jawt.h b/src/client/jni/jni/jawt.h new file mode 100755 index 000000000000..1057894ad9e6 --- /dev/null +++ b/src/client/jni/jni/jawt.h @@ -0,0 +1,278 @@ +/* + * @(#)jawt.h 1.11 05/11/17 + * + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + +#ifndef _JAVASOFT_JAWT_H_ +#define _JAVASOFT_JAWT_H_ + +#include "jni.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * AWT native interface (new in JDK 1.3) + * + * The AWT native interface allows a native C or C++ application a means + * by which to access native structures in AWT. This is to facilitate moving + * legacy C and C++ applications to Java and to target the needs of the + * community who, at present, wish to do their own native rendering to canvases + * for performance reasons. Standard extensions such as Java3D also require a + * means to access the underlying native data structures of AWT. + * + * There may be future extensions to this API depending on demand. + * + * A VM does not have to implement this API in order to pass the JCK. + * It is recommended, however, that this API is implemented on VMs that support + * standard extensions, such as Java3D. + * + * Since this is a native API, any program which uses it cannot be considered + * 100% pure java. + */ + +/* + * AWT Native Drawing Surface (JAWT_DrawingSurface). + * + * For each platform, there is a native drawing surface structure. This + * platform-specific structure can be found in jawt_md.h. It is recommended + * that additional platforms follow the same model. It is also recommended + * that VMs on Win32 and Solaris support the existing structures in jawt_md.h. + * + ******************* + * EXAMPLE OF USAGE: + ******************* + * + * In Win32, a programmer wishes to access the HWND of a canvas to perform + * native rendering into it. The programmer has declared the paint() method + * for their canvas subclass to be native: + * + * + * MyCanvas.java: + * + * import java.awt.*; + * + * public class MyCanvas extends Canvas { + * + * static { + * System.loadLibrary("mylib"); + * } + * + * public native void paint(Graphics g); + * } + * + * + * myfile.c: + * + * #include "jawt_md.h" + * #include + * + * JNIEXPORT void JNICALL + * Java_MyCanvas_paint(JNIEnv* env, jobject canvas, jobject graphics) + * { + * JAWT awt; + * JAWT_DrawingSurface* ds; + * JAWT_DrawingSurfaceInfo* dsi; + * JAWT_Win32DrawingSurfaceInfo* dsi_win; + * jboolean result; + * jint lock; + * + * // Get the AWT + * awt.version = JAWT_VERSION_1_3; + * result = JAWT_GetAWT(env, &awt); + * assert(result != JNI_FALSE); + * + * // Get the drawing surface + * ds = awt.GetDrawingSurface(env, canvas); + * assert(ds != NULL); + * + * // Lock the drawing surface + * lock = ds->Lock(ds); + * assert((lock & JAWT_LOCK_ERROR) == 0); + * + * // Get the drawing surface info + * dsi = ds->GetDrawingSurfaceInfo(ds); + * + * // Get the platform-specific drawing info + * dsi_win = (JAWT_Win32DrawingSurfaceInfo*)dsi->platformInfo; + * + * ////////////////////////////// + * // !!! DO PAINTING HERE !!! // + * ////////////////////////////// + * + * // Free the drawing surface info + * ds->FreeDrawingSurfaceInfo(dsi); + * + * // Unlock the drawing surface + * ds->Unlock(ds); + * + * // Free the drawing surface + * awt.FreeDrawingSurface(ds); + * } + * + */ + +/* + * JAWT_Rectangle + * Structure for a native rectangle. + */ +typedef struct jawt_Rectangle { + jint x; + jint y; + jint width; + jint height; +} JAWT_Rectangle; + +struct jawt_DrawingSurface; + +/* + * JAWT_DrawingSurfaceInfo + * Structure for containing the underlying drawing information of a component. + */ +typedef struct jawt_DrawingSurfaceInfo { + /* + * Pointer to the platform-specific information. This can be safely + * cast to a JAWT_Win32DrawingSurfaceInfo on Windows or a + * JAWT_X11DrawingSurfaceInfo on Solaris. See jawt_md.h for details. + */ + void* platformInfo; + /* Cached pointer to the underlying drawing surface */ + struct jawt_DrawingSurface* ds; + /* Bounding rectangle of the drawing surface */ + JAWT_Rectangle bounds; + /* Number of rectangles in the clip */ + jint clipSize; + /* Clip rectangle array */ + JAWT_Rectangle* clip; +} JAWT_DrawingSurfaceInfo; + +#define JAWT_LOCK_ERROR 0x00000001 +#define JAWT_LOCK_CLIP_CHANGED 0x00000002 +#define JAWT_LOCK_BOUNDS_CHANGED 0x00000004 +#define JAWT_LOCK_SURFACE_CHANGED 0x00000008 + +/* + * JAWT_DrawingSurface + * Structure for containing the underlying drawing information of a component. + * All operations on a JAWT_DrawingSurface MUST be performed from the same + * thread as the call to GetDrawingSurface. + */ +typedef struct jawt_DrawingSurface { + /* + * Cached reference to the Java environment of the calling thread. + * If Lock(), Unlock(), GetDrawingSurfaceInfo() or + * FreeDrawingSurfaceInfo() are called from a different thread, + * this data member should be set before calling those functions. + */ + JNIEnv* env; + /* Cached reference to the target object */ + jobject target; + /* + * Lock the surface of the target component for native rendering. + * When finished drawing, the surface must be unlocked with + * Unlock(). This function returns a bitmask with one or more of the + * following values: + * + * JAWT_LOCK_ERROR - When an error has occurred and the surface could not + * be locked. + * + * JAWT_LOCK_CLIP_CHANGED - When the clip region has changed. + * + * JAWT_LOCK_BOUNDS_CHANGED - When the bounds of the surface have changed. + * + * JAWT_LOCK_SURFACE_CHANGED - When the surface itself has changed + */ + jint (JNICALL *Lock) + (struct jawt_DrawingSurface* ds); + /* + * Get the drawing surface info. + * The value returned may be cached, but the values may change if + * additional calls to Lock() or Unlock() are made. + * Lock() must be called before this can return a valid value. + * Returns NULL if an error has occurred. + * When finished with the returned value, FreeDrawingSurfaceInfo must be + * called. + */ + JAWT_DrawingSurfaceInfo* (JNICALL *GetDrawingSurfaceInfo) + (struct jawt_DrawingSurface* ds); + /* + * Free the drawing surface info. + */ + void (JNICALL *FreeDrawingSurfaceInfo) + (JAWT_DrawingSurfaceInfo* dsi); + /* + * Unlock the drawing surface of the target component for native rendering. + */ + void (JNICALL *Unlock) + (struct jawt_DrawingSurface* ds); +} JAWT_DrawingSurface; + +/* + * JAWT + * Structure for containing native AWT functions. + */ +typedef struct jawt { + /* + * Version of this structure. This must always be set before + * calling JAWT_GetAWT() + */ + jint version; + /* + * Return a drawing surface from a target jobject. This value + * may be cached. + * Returns NULL if an error has occurred. + * Target must be a java.awt.Component (should be a Canvas + * or Window for native rendering). + * FreeDrawingSurface() must be called when finished with the + * returned JAWT_DrawingSurface. + */ + JAWT_DrawingSurface* (JNICALL *GetDrawingSurface) + (JNIEnv* env, jobject target); + /* + * Free the drawing surface allocated in GetDrawingSurface. + */ + void (JNICALL *FreeDrawingSurface) + (JAWT_DrawingSurface* ds); + /* + * Since 1.4 + * Locks the entire AWT for synchronization purposes + */ + void (JNICALL *Lock)(JNIEnv* env); + /* + * Since 1.4 + * Unlocks the entire AWT for synchronization purposes + */ + void (JNICALL *Unlock)(JNIEnv* env); + /* + * Since 1.4 + * Returns a reference to a java.awt.Component from a native + * platform handle. On Windows, this corresponds to an HWND; + * on Solaris and Linux, this is a Drawable. For other platforms, + * see the appropriate machine-dependent header file for a description. + * The reference returned by this function is a local + * reference that is only valid in this environment. + * This function returns a NULL reference if no component could be + * found with matching platform information. + */ + jobject (JNICALL *GetComponent)(JNIEnv* env, void* platformInfo); + +} JAWT; + +/* + * Get the AWT native structure. This function returns JNI_FALSE if + * an error occurs. + */ +_JNI_IMPORT_OR_EXPORT_ __attribute__((deprecated)) +jboolean JNICALL JAWT_GetAWT(JNIEnv* env, JAWT* awt); + +#define JAWT_VERSION_1_3 0x00010003 +#define JAWT_VERSION_1_4 0x00010004 + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* !_JAVASOFT_JAWT_H_ */ diff --git a/src/client/jni/jni/jawt_md.h b/src/client/jni/jni/jawt_md.h new file mode 100755 index 000000000000..e3eed941c436 --- /dev/null +++ b/src/client/jni/jni/jawt_md.h @@ -0,0 +1,66 @@ +// +// jawt_md.h +// Copyright (c) 2002-2010 Apple Inc. All rights reserved. +// + +#ifndef _JAVASOFT_JAWT_MD_H_ +#define _JAVASOFT_JAWT_MD_H_ + +#include "jawt.h" + +#import +#import + + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * JAWT on Mac OS X has two rendering models; legacy NSView, and CALayer. + * + * The CALayer based model returns an object conforming to the JAWT_SurfaceLayers + * protocol in it's JAWT_DrawingSurfaceInfo->platformInfo pointer. A CALayer + * assigned to the "layer" property overlays the rectangle of the java.awt.Component. + * The client CALayer traces the rect of the Java component as it is moved and resized. + * + * If there is a superlayer for the entire window, it is also accessible via + * the "windowLayer" property. This layer is useful for embedding the Java + * window in other layer graphs. + * + * + * The legacy NSView model provides raw access to the NSView hierarchy which + * mirrors the Java component hierarchy. The legacy NSView drawing model is deprecated, + * and will not be available in future versions of Java for Mac OS X. + * + * Clients can opt-into the CALayer model by OR'ing the JAWT_MACOSX_USE_CALAYER into the requested JAWT version. + * + * JAWT awt; + * awt.version = JAWT_VERSION_1_4 | JAWT_MACOSX_USE_CALAYER; + * jboolean success = JAWT_GetAWT(env, &awt); + * + * Future versions of Java for Mac OS X will only support the CALayer model, + * and will not return a JAWT_MacOSXDrawingSurfaceInfo struct. + */ + +#define JAWT_MACOSX_USE_CALAYER 0x80000000 + +// CALayer-based rendering +@protocol JAWT_SurfaceLayers +@property (readwrite, retain) CALayer *layer; +@property (readonly) CALayer *windowLayer; +@end + + +// Legacy NSView-based rendering +typedef struct JAWT_MacOSXDrawingSurfaceInfo { + NSView *cocoaViewRef; // the view is guaranteed to be valid only for the duration of Component.paint method +} +JAWT_MacOSXDrawingSurfaceInfo; + + +#ifdef __cplusplus +} +#endif + +#endif /* !_JAVASOFT_JAWT_MD_H_ */ diff --git a/src/client/jni/jni/jdwpTransport.h b/src/client/jni/jni/jdwpTransport.h new file mode 100755 index 000000000000..fc46ca48ecb5 --- /dev/null +++ b/src/client/jni/jni/jdwpTransport.h @@ -0,0 +1,237 @@ +/* + * @(#)jdwpTransport.h 1.8 05/11/17 + * + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + +/* + * Java Debug Wire Protocol Transport Service Provider Interface. + */ + +#ifndef JDWPTRANSPORT_H +#define JDWPTRANSPORT_H + +#include "jni.h" + +enum { + JDWPTRANSPORT_VERSION_1_0 = 0x00010000 +}; + +#ifdef __cplusplus +extern "C" { +#endif + +struct jdwpTransportNativeInterface_; + +struct _jdwpTransportEnv; + +#ifdef __cplusplus +typedef _jdwpTransportEnv jdwpTransportEnv; +#else +typedef const struct jdwpTransportNativeInterface_ *jdwpTransportEnv; +#endif /* __cplusplus */ + +/* + * Errors. Universal errors with JVMTI/JVMDI equivalents keep the + * values the same. + */ +typedef enum { + JDWPTRANSPORT_ERROR_NONE = 0, + JDWPTRANSPORT_ERROR_ILLEGAL_ARGUMENT = 103, + JDWPTRANSPORT_ERROR_OUT_OF_MEMORY = 110, + JDWPTRANSPORT_ERROR_INTERNAL = 113, + JDWPTRANSPORT_ERROR_ILLEGAL_STATE = 201, + JDWPTRANSPORT_ERROR_IO_ERROR = 202, + JDWPTRANSPORT_ERROR_TIMEOUT = 203, + JDWPTRANSPORT_ERROR_MSG_NOT_AVAILABLE = 204 +} jdwpTransportError; + + +/* + * Structure to define capabilities + */ +typedef struct { + unsigned int can_timeout_attach :1; + unsigned int can_timeout_accept :1; + unsigned int can_timeout_handshake :1; + unsigned int reserved3 :1; + unsigned int reserved4 :1; + unsigned int reserved5 :1; + unsigned int reserved6 :1; + unsigned int reserved7 :1; + unsigned int reserved8 :1; + unsigned int reserved9 :1; + unsigned int reserved10 :1; + unsigned int reserved11 :1; + unsigned int reserved12 :1; + unsigned int reserved13 :1; + unsigned int reserved14 :1; + unsigned int reserved15 :1; +} JDWPTransportCapabilities; + + +/* + * Structures to define packet layout. + * + * See: http://java.sun.com/j2se/1.5/docs/guide/jpda/jdwp-spec.html + */ + +enum { + JDWPTRANSPORT_FLAGS_NONE = 0x0, + JDWPTRANSPORT_FLAGS_REPLY = 0x80 +}; + +typedef struct { + jint len; + jint id; + jbyte flags; + jbyte cmdSet; + jbyte cmd; + jbyte *data; +} jdwpCmdPacket; + +typedef struct { + jint len; + jint id; + jbyte flags; + jshort errorCode; + jbyte *data; +} jdwpReplyPacket; + +typedef struct { + union { + jdwpCmdPacket cmd; + jdwpReplyPacket reply; + } type; +} jdwpPacket; + +/* + * JDWP functions called by the transport. + */ +typedef struct jdwpTransportCallback { + void *(*alloc)(jint numBytes); /* Call this for all allocations */ + void (*free)(void *buffer); /* Call this for all deallocations */ +} jdwpTransportCallback; + +typedef jint (JNICALL *jdwpTransport_OnLoad_t)(JavaVM *jvm, + jdwpTransportCallback *callback, + jint version, + jdwpTransportEnv** env); + + + +/* Function Interface */ + +struct jdwpTransportNativeInterface_ { + /* 1 : RESERVED */ + void *reserved1; + + /* 2 : Get Capabilities */ + jdwpTransportError (JNICALL *GetCapabilities)(jdwpTransportEnv* env, + JDWPTransportCapabilities *capabilities_ptr); + + /* 3 : Attach */ + jdwpTransportError (JNICALL *Attach)(jdwpTransportEnv* env, + const char* address, + jlong attach_timeout, + jlong handshake_timeout); + + /* 4: StartListening */ + jdwpTransportError (JNICALL *StartListening)(jdwpTransportEnv* env, + const char* address, + char** actual_address); + + /* 5: StopListening */ + jdwpTransportError (JNICALL *StopListening)(jdwpTransportEnv* env); + + /* 6: Accept */ + jdwpTransportError (JNICALL *Accept)(jdwpTransportEnv* env, + jlong accept_timeout, + jlong handshake_timeout); + + /* 7: IsOpen */ + jboolean (JNICALL *IsOpen)(jdwpTransportEnv* env); + + /* 8: Close */ + jdwpTransportError (JNICALL *Close)(jdwpTransportEnv* env); + + /* 9: ReadPacket */ + jdwpTransportError (JNICALL *ReadPacket)(jdwpTransportEnv* env, + jdwpPacket *pkt); + + /* 10: Write Packet */ + jdwpTransportError (JNICALL *WritePacket)(jdwpTransportEnv* env, + const jdwpPacket* pkt); + + /* 11: GetLastError */ + jdwpTransportError (JNICALL *GetLastError)(jdwpTransportEnv* env, + char** error); + +}; + + +/* + * Use inlined functions so that C++ code can use syntax such as + * env->Attach("mymachine:5000", 10*1000, 0); + * + * rather than using C's :- + * + * (*env)->Attach(env, "mymachine:5000", 10*1000, 0); + */ +struct _jdwpTransportEnv { + const struct jdwpTransportNativeInterface_ *functions; +#ifdef __cplusplus + + jdwpTransportError GetCapabilities(JDWPTransportCapabilities *capabilities_ptr) { + return functions->GetCapabilities(this, capabilities_ptr); + } + + jdwpTransportError Attach(const char* address, jlong attach_timeout, + jlong handshake_timeout) { + return functions->Attach(this, address, attach_timeout, handshake_timeout); + } + + jdwpTransportError StartListening(const char* address, + char** actual_address) { + return functions->StartListening(this, address, actual_address); + } + + jdwpTransportError StopListening(void) { + return functions->StopListening(this); + } + + jdwpTransportError Accept(jlong accept_timeout, jlong handshake_timeout) { + return functions->Accept(this, accept_timeout, handshake_timeout); + } + + jboolean IsOpen(void) { + return functions->IsOpen(this); + } + + jdwpTransportError Close(void) { + return functions->Close(this); + } + + jdwpTransportError ReadPacket(jdwpPacket *pkt) { + return functions->ReadPacket(this, pkt); + } + + jdwpTransportError WritePacket(const jdwpPacket* pkt) { + return functions->WritePacket(this, pkt); + } + + jdwpTransportError GetLastError(char** error) { + return functions->GetLastError(this, error); + } + + +#endif /* __cplusplus */ +}; + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* JDWPTRANSPORT_H */ + diff --git a/src/client/jni/jni/jni.h b/src/client/jni/jni/jni.h new file mode 100755 index 000000000000..cff1a02fcf32 --- /dev/null +++ b/src/client/jni/jni/jni.h @@ -0,0 +1,1961 @@ +/* + * @(#)jni.h 1.62 06/02/02 + * + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + +/* + * We used part of Netscape's Java Runtime Interface (JRI) as the starting + * point of our design and implementation. + */ + +/****************************************************************************** + * Java Runtime Interface + * Copyright (c) 1996 Netscape Communications Corporation. All rights reserved. + *****************************************************************************/ + +#ifndef _JAVASOFT_JNI_H_ +#define _JAVASOFT_JNI_H_ + +#include +#include + +/* jni_md.h contains the machine-dependent typedefs for jbyte, jint + and jlong */ + +#include "jni_md.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * JNI Types + */ + +#ifndef JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H + +typedef unsigned char jboolean; +typedef unsigned short jchar; +typedef short jshort; +typedef float jfloat; +typedef double jdouble; + +typedef jint jsize; + +#ifdef __cplusplus + +class _jobject {}; +class _jclass : public _jobject {}; +class _jthrowable : public _jobject {}; +class _jstring : public _jobject {}; +class _jarray : public _jobject {}; +class _jbooleanArray : public _jarray {}; +class _jbyteArray : public _jarray {}; +class _jcharArray : public _jarray {}; +class _jshortArray : public _jarray {}; +class _jintArray : public _jarray {}; +class _jlongArray : public _jarray {}; +class _jfloatArray : public _jarray {}; +class _jdoubleArray : public _jarray {}; +class _jobjectArray : public _jarray {}; + +typedef _jobject *jobject; +typedef _jclass *jclass; +typedef _jthrowable *jthrowable; +typedef _jstring *jstring; +typedef _jarray *jarray; +typedef _jbooleanArray *jbooleanArray; +typedef _jbyteArray *jbyteArray; +typedef _jcharArray *jcharArray; +typedef _jshortArray *jshortArray; +typedef _jintArray *jintArray; +typedef _jlongArray *jlongArray; +typedef _jfloatArray *jfloatArray; +typedef _jdoubleArray *jdoubleArray; +typedef _jobjectArray *jobjectArray; + +#else + +struct _jobject; + +typedef struct _jobject *jobject; +typedef jobject jclass; +typedef jobject jthrowable; +typedef jobject jstring; +typedef jobject jarray; +typedef jarray jbooleanArray; +typedef jarray jbyteArray; +typedef jarray jcharArray; +typedef jarray jshortArray; +typedef jarray jintArray; +typedef jarray jlongArray; +typedef jarray jfloatArray; +typedef jarray jdoubleArray; +typedef jarray jobjectArray; + +#endif + +typedef jobject jweak; + +typedef union jvalue { + jboolean z; + jbyte b; + jchar c; + jshort s; + jint i; + jlong j; + jfloat f; + jdouble d; + jobject l; +} jvalue; + +struct _jfieldID; +typedef struct _jfieldID *jfieldID; + +struct _jmethodID; +typedef struct _jmethodID *jmethodID; + +/* Return values from jobjectRefType */ +typedef enum _jobjectType { + JNIInvalidRefType = 0, + JNILocalRefType = 1, + JNIGlobalRefType = 2, + JNIWeakGlobalRefType = 3 +} jobjectRefType; + + +#endif /* JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H */ + +/* + * jboolean constants + */ + +#define JNI_FALSE 0 +#define JNI_TRUE 1 + +/* + * possible return values for JNI functions. + */ + +#define JNI_OK 0 /* success */ +#define JNI_ERR (-1) /* unknown error */ +#define JNI_EDETACHED (-2) /* thread detached from the VM */ +#define JNI_EVERSION (-3) /* JNI version error */ +#define JNI_ENOMEM (-4) /* not enough memory */ +#define JNI_EEXIST (-5) /* VM already created */ +#define JNI_EINVAL (-6) /* invalid arguments */ + +/* + * used in ReleaseScalarArrayElements + */ + +#define JNI_COMMIT 1 +#define JNI_ABORT 2 + +/* + * used in RegisterNatives to describe native method name, signature, + * and function pointer. + */ + +typedef struct { + char *name; + char *signature; + void *fnPtr; +} JNINativeMethod; + +/* + * JNI Native Method Interface. + */ + +struct JNINativeInterface_; + +struct JNIEnv_; + +#ifdef __cplusplus +typedef JNIEnv_ JNIEnv; +#else +typedef const struct JNINativeInterface_ *JNIEnv; +#endif + +/* + * JNI Invocation Interface. + */ + +struct JNIInvokeInterface_; + +struct JavaVM_; + +#ifdef __cplusplus +typedef JavaVM_ JavaVM; +#else +typedef const struct JNIInvokeInterface_ *JavaVM; +#endif + +struct JNINativeInterface_ { + void *reserved0; + void *reserved1; + void *reserved2; + + void *reserved3; + +#if !TARGET_RT_MAC_CFM && defined(__ppc__) + void* cfm_vectors[225]; +#endif /* !TARGET_RT_MAC_CFM && defined(__ppc__) */ + + jint (JNICALL *GetVersion)(JNIEnv *env); + + jclass (JNICALL *DefineClass) + (JNIEnv *env, const char *name, jobject loader, const jbyte *buf, + jsize len); + jclass (JNICALL *FindClass) + (JNIEnv *env, const char *name); + + jmethodID (JNICALL *FromReflectedMethod) + (JNIEnv *env, jobject method); + jfieldID (JNICALL *FromReflectedField) + (JNIEnv *env, jobject field); + + jobject (JNICALL *ToReflectedMethod) + (JNIEnv *env, jclass cls, jmethodID methodID, jboolean isStatic); + + jclass (JNICALL *GetSuperclass) + (JNIEnv *env, jclass sub); + jboolean (JNICALL *IsAssignableFrom) + (JNIEnv *env, jclass sub, jclass sup); + + jobject (JNICALL *ToReflectedField) + (JNIEnv *env, jclass cls, jfieldID fieldID, jboolean isStatic); + + jint (JNICALL *Throw) + (JNIEnv *env, jthrowable obj); + jint (JNICALL *ThrowNew) + (JNIEnv *env, jclass clazz, const char *msg); + jthrowable (JNICALL *ExceptionOccurred) + (JNIEnv *env); + void (JNICALL *ExceptionDescribe) + (JNIEnv *env); + void (JNICALL *ExceptionClear) + (JNIEnv *env); + void (JNICALL *FatalError) + (JNIEnv *env, const char *msg); + + jint (JNICALL *PushLocalFrame) + (JNIEnv *env, jint capacity); + jobject (JNICALL *PopLocalFrame) + (JNIEnv *env, jobject result); + + jobject (JNICALL *NewGlobalRef) + (JNIEnv *env, jobject lobj); + void (JNICALL *DeleteGlobalRef) + (JNIEnv *env, jobject gref); + void (JNICALL *DeleteLocalRef) + (JNIEnv *env, jobject obj); + jboolean (JNICALL *IsSameObject) + (JNIEnv *env, jobject obj1, jobject obj2); + jobject (JNICALL *NewLocalRef) + (JNIEnv *env, jobject ref); + jint (JNICALL *EnsureLocalCapacity) + (JNIEnv *env, jint capacity); + + jobject (JNICALL *AllocObject) + (JNIEnv *env, jclass clazz); + jobject (JNICALL *NewObject) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jobject (JNICALL *NewObjectV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jobject (JNICALL *NewObjectA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jclass (JNICALL *GetObjectClass) + (JNIEnv *env, jobject obj); + jboolean (JNICALL *IsInstanceOf) + (JNIEnv *env, jobject obj, jclass clazz); + + jmethodID (JNICALL *GetMethodID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + + jobject (JNICALL *CallObjectMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jobject (JNICALL *CallObjectMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jobject (JNICALL *CallObjectMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args); + + jboolean (JNICALL *CallBooleanMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jboolean (JNICALL *CallBooleanMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jboolean (JNICALL *CallBooleanMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args); + + jbyte (JNICALL *CallByteMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jbyte (JNICALL *CallByteMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jbyte (JNICALL *CallByteMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jchar (JNICALL *CallCharMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jchar (JNICALL *CallCharMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jchar (JNICALL *CallCharMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jshort (JNICALL *CallShortMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jshort (JNICALL *CallShortMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jshort (JNICALL *CallShortMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jint (JNICALL *CallIntMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jint (JNICALL *CallIntMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jint (JNICALL *CallIntMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jlong (JNICALL *CallLongMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jlong (JNICALL *CallLongMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jlong (JNICALL *CallLongMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jfloat (JNICALL *CallFloatMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jfloat (JNICALL *CallFloatMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jfloat (JNICALL *CallFloatMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + jdouble (JNICALL *CallDoubleMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + jdouble (JNICALL *CallDoubleMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + jdouble (JNICALL *CallDoubleMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args); + + void (JNICALL *CallVoidMethod) + (JNIEnv *env, jobject obj, jmethodID methodID, ...); + void (JNICALL *CallVoidMethodV) + (JNIEnv *env, jobject obj, jmethodID methodID, va_list args); + void (JNICALL *CallVoidMethodA) + (JNIEnv *env, jobject obj, jmethodID methodID, const jvalue * args); + + jobject (JNICALL *CallNonvirtualObjectMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jobject (JNICALL *CallNonvirtualObjectMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jobject (JNICALL *CallNonvirtualObjectMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue * args); + + jboolean (JNICALL *CallNonvirtualBooleanMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jboolean (JNICALL *CallNonvirtualBooleanMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jboolean (JNICALL *CallNonvirtualBooleanMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue * args); + + jbyte (JNICALL *CallNonvirtualByteMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jbyte (JNICALL *CallNonvirtualByteMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jbyte (JNICALL *CallNonvirtualByteMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jchar (JNICALL *CallNonvirtualCharMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jchar (JNICALL *CallNonvirtualCharMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jchar (JNICALL *CallNonvirtualCharMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jshort (JNICALL *CallNonvirtualShortMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jshort (JNICALL *CallNonvirtualShortMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jshort (JNICALL *CallNonvirtualShortMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jint (JNICALL *CallNonvirtualIntMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jint (JNICALL *CallNonvirtualIntMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jint (JNICALL *CallNonvirtualIntMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jlong (JNICALL *CallNonvirtualLongMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jlong (JNICALL *CallNonvirtualLongMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jlong (JNICALL *CallNonvirtualLongMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jfloat (JNICALL *CallNonvirtualFloatMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jfloat (JNICALL *CallNonvirtualFloatMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jfloat (JNICALL *CallNonvirtualFloatMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + jdouble (JNICALL *CallNonvirtualDoubleMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + jdouble (JNICALL *CallNonvirtualDoubleMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + jdouble (JNICALL *CallNonvirtualDoubleMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue *args); + + void (JNICALL *CallNonvirtualVoidMethod) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, ...); + void (JNICALL *CallNonvirtualVoidMethodV) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + va_list args); + void (JNICALL *CallNonvirtualVoidMethodA) + (JNIEnv *env, jobject obj, jclass clazz, jmethodID methodID, + const jvalue * args); + + jfieldID (JNICALL *GetFieldID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + + jobject (JNICALL *GetObjectField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jboolean (JNICALL *GetBooleanField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jbyte (JNICALL *GetByteField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jchar (JNICALL *GetCharField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jshort (JNICALL *GetShortField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jint (JNICALL *GetIntField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jlong (JNICALL *GetLongField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jfloat (JNICALL *GetFloatField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + jdouble (JNICALL *GetDoubleField) + (JNIEnv *env, jobject obj, jfieldID fieldID); + + void (JNICALL *SetObjectField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jobject val); + void (JNICALL *SetBooleanField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jboolean val); + void (JNICALL *SetByteField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jbyte val); + void (JNICALL *SetCharField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jchar val); + void (JNICALL *SetShortField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jshort val); + void (JNICALL *SetIntField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jint val); + void (JNICALL *SetLongField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jlong val); + void (JNICALL *SetFloatField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jfloat val); + void (JNICALL *SetDoubleField) + (JNIEnv *env, jobject obj, jfieldID fieldID, jdouble val); + + jmethodID (JNICALL *GetStaticMethodID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + + jobject (JNICALL *CallStaticObjectMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jobject (JNICALL *CallStaticObjectMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jobject (JNICALL *CallStaticObjectMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jboolean (JNICALL *CallStaticBooleanMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jboolean (JNICALL *CallStaticBooleanMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jboolean (JNICALL *CallStaticBooleanMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jbyte (JNICALL *CallStaticByteMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jbyte (JNICALL *CallStaticByteMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jbyte (JNICALL *CallStaticByteMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jchar (JNICALL *CallStaticCharMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jchar (JNICALL *CallStaticCharMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jchar (JNICALL *CallStaticCharMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jshort (JNICALL *CallStaticShortMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jshort (JNICALL *CallStaticShortMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jshort (JNICALL *CallStaticShortMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jint (JNICALL *CallStaticIntMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jint (JNICALL *CallStaticIntMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jint (JNICALL *CallStaticIntMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jlong (JNICALL *CallStaticLongMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jlong (JNICALL *CallStaticLongMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jlong (JNICALL *CallStaticLongMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jfloat (JNICALL *CallStaticFloatMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jfloat (JNICALL *CallStaticFloatMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jfloat (JNICALL *CallStaticFloatMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + jdouble (JNICALL *CallStaticDoubleMethod) + (JNIEnv *env, jclass clazz, jmethodID methodID, ...); + jdouble (JNICALL *CallStaticDoubleMethodV) + (JNIEnv *env, jclass clazz, jmethodID methodID, va_list args); + jdouble (JNICALL *CallStaticDoubleMethodA) + (JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args); + + void (JNICALL *CallStaticVoidMethod) + (JNIEnv *env, jclass cls, jmethodID methodID, ...); + void (JNICALL *CallStaticVoidMethodV) + (JNIEnv *env, jclass cls, jmethodID methodID, va_list args); + void (JNICALL *CallStaticVoidMethodA) + (JNIEnv *env, jclass cls, jmethodID methodID, const jvalue * args); + + jfieldID (JNICALL *GetStaticFieldID) + (JNIEnv *env, jclass clazz, const char *name, const char *sig); + jobject (JNICALL *GetStaticObjectField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jboolean (JNICALL *GetStaticBooleanField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jbyte (JNICALL *GetStaticByteField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jchar (JNICALL *GetStaticCharField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jshort (JNICALL *GetStaticShortField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jint (JNICALL *GetStaticIntField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jlong (JNICALL *GetStaticLongField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jfloat (JNICALL *GetStaticFloatField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + jdouble (JNICALL *GetStaticDoubleField) + (JNIEnv *env, jclass clazz, jfieldID fieldID); + + void (JNICALL *SetStaticObjectField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jobject value); + void (JNICALL *SetStaticBooleanField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jboolean value); + void (JNICALL *SetStaticByteField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jbyte value); + void (JNICALL *SetStaticCharField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jchar value); + void (JNICALL *SetStaticShortField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jshort value); + void (JNICALL *SetStaticIntField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jint value); + void (JNICALL *SetStaticLongField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jlong value); + void (JNICALL *SetStaticFloatField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jfloat value); + void (JNICALL *SetStaticDoubleField) + (JNIEnv *env, jclass clazz, jfieldID fieldID, jdouble value); + + jstring (JNICALL *NewString) + (JNIEnv *env, const jchar *unicode, jsize len); + jsize (JNICALL *GetStringLength) + (JNIEnv *env, jstring str); + const jchar *(JNICALL *GetStringChars) + (JNIEnv *env, jstring str, jboolean *isCopy); + void (JNICALL *ReleaseStringChars) + (JNIEnv *env, jstring str, const jchar *chars); + + jstring (JNICALL *NewStringUTF) + (JNIEnv *env, const char *utf); + jsize (JNICALL *GetStringUTFLength) + (JNIEnv *env, jstring str); + const char* (JNICALL *GetStringUTFChars) + (JNIEnv *env, jstring str, jboolean *isCopy); + void (JNICALL *ReleaseStringUTFChars) + (JNIEnv *env, jstring str, const char* chars); + + + jsize (JNICALL *GetArrayLength) + (JNIEnv *env, jarray array); + + jobjectArray (JNICALL *NewObjectArray) + (JNIEnv *env, jsize len, jclass clazz, jobject init); + jobject (JNICALL *GetObjectArrayElement) + (JNIEnv *env, jobjectArray array, jsize index); + void (JNICALL *SetObjectArrayElement) + (JNIEnv *env, jobjectArray array, jsize index, jobject val); + + jbooleanArray (JNICALL *NewBooleanArray) + (JNIEnv *env, jsize len); + jbyteArray (JNICALL *NewByteArray) + (JNIEnv *env, jsize len); + jcharArray (JNICALL *NewCharArray) + (JNIEnv *env, jsize len); + jshortArray (JNICALL *NewShortArray) + (JNIEnv *env, jsize len); + jintArray (JNICALL *NewIntArray) + (JNIEnv *env, jsize len); + jlongArray (JNICALL *NewLongArray) + (JNIEnv *env, jsize len); + jfloatArray (JNICALL *NewFloatArray) + (JNIEnv *env, jsize len); + jdoubleArray (JNICALL *NewDoubleArray) + (JNIEnv *env, jsize len); + + jboolean * (JNICALL *GetBooleanArrayElements) + (JNIEnv *env, jbooleanArray array, jboolean *isCopy); + jbyte * (JNICALL *GetByteArrayElements) + (JNIEnv *env, jbyteArray array, jboolean *isCopy); + jchar * (JNICALL *GetCharArrayElements) + (JNIEnv *env, jcharArray array, jboolean *isCopy); + jshort * (JNICALL *GetShortArrayElements) + (JNIEnv *env, jshortArray array, jboolean *isCopy); + jint * (JNICALL *GetIntArrayElements) + (JNIEnv *env, jintArray array, jboolean *isCopy); + jlong * (JNICALL *GetLongArrayElements) + (JNIEnv *env, jlongArray array, jboolean *isCopy); + jfloat * (JNICALL *GetFloatArrayElements) + (JNIEnv *env, jfloatArray array, jboolean *isCopy); + jdouble * (JNICALL *GetDoubleArrayElements) + (JNIEnv *env, jdoubleArray array, jboolean *isCopy); + + void (JNICALL *ReleaseBooleanArrayElements) + (JNIEnv *env, jbooleanArray array, jboolean *elems, jint mode); + void (JNICALL *ReleaseByteArrayElements) + (JNIEnv *env, jbyteArray array, jbyte *elems, jint mode); + void (JNICALL *ReleaseCharArrayElements) + (JNIEnv *env, jcharArray array, jchar *elems, jint mode); + void (JNICALL *ReleaseShortArrayElements) + (JNIEnv *env, jshortArray array, jshort *elems, jint mode); + void (JNICALL *ReleaseIntArrayElements) + (JNIEnv *env, jintArray array, jint *elems, jint mode); + void (JNICALL *ReleaseLongArrayElements) + (JNIEnv *env, jlongArray array, jlong *elems, jint mode); + void (JNICALL *ReleaseFloatArrayElements) + (JNIEnv *env, jfloatArray array, jfloat *elems, jint mode); + void (JNICALL *ReleaseDoubleArrayElements) + (JNIEnv *env, jdoubleArray array, jdouble *elems, jint mode); + + void (JNICALL *GetBooleanArrayRegion) + (JNIEnv *env, jbooleanArray array, jsize start, jsize l, jboolean *buf); + void (JNICALL *GetByteArrayRegion) + (JNIEnv *env, jbyteArray array, jsize start, jsize len, jbyte *buf); + void (JNICALL *GetCharArrayRegion) + (JNIEnv *env, jcharArray array, jsize start, jsize len, jchar *buf); + void (JNICALL *GetShortArrayRegion) + (JNIEnv *env, jshortArray array, jsize start, jsize len, jshort *buf); + void (JNICALL *GetIntArrayRegion) + (JNIEnv *env, jintArray array, jsize start, jsize len, jint *buf); + void (JNICALL *GetLongArrayRegion) + (JNIEnv *env, jlongArray array, jsize start, jsize len, jlong *buf); + void (JNICALL *GetFloatArrayRegion) + (JNIEnv *env, jfloatArray array, jsize start, jsize len, jfloat *buf); + void (JNICALL *GetDoubleArrayRegion) + (JNIEnv *env, jdoubleArray array, jsize start, jsize len, jdouble *buf); + + void (JNICALL *SetBooleanArrayRegion) + (JNIEnv *env, jbooleanArray array, jsize start, jsize l, const jboolean *buf); + void (JNICALL *SetByteArrayRegion) + (JNIEnv *env, jbyteArray array, jsize start, jsize len, const jbyte *buf); + void (JNICALL *SetCharArrayRegion) + (JNIEnv *env, jcharArray array, jsize start, jsize len, const jchar *buf); + void (JNICALL *SetShortArrayRegion) + (JNIEnv *env, jshortArray array, jsize start, jsize len, const jshort *buf); + void (JNICALL *SetIntArrayRegion) + (JNIEnv *env, jintArray array, jsize start, jsize len, const jint *buf); + void (JNICALL *SetLongArrayRegion) + (JNIEnv *env, jlongArray array, jsize start, jsize len, const jlong *buf); + void (JNICALL *SetFloatArrayRegion) + (JNIEnv *env, jfloatArray array, jsize start, jsize len, const jfloat *buf); + void (JNICALL *SetDoubleArrayRegion) + (JNIEnv *env, jdoubleArray array, jsize start, jsize len, const jdouble *buf); + + jint (JNICALL *RegisterNatives) + (JNIEnv *env, jclass clazz, const JNINativeMethod *methods, + jint nMethods); + jint (JNICALL *UnregisterNatives) + (JNIEnv *env, jclass clazz); + + jint (JNICALL *MonitorEnter) + (JNIEnv *env, jobject obj); + jint (JNICALL *MonitorExit) + (JNIEnv *env, jobject obj); + + jint (JNICALL *GetJavaVM) + (JNIEnv *env, JavaVM **vm); + + void (JNICALL *GetStringRegion) + (JNIEnv *env, jstring str, jsize start, jsize len, jchar *buf); + void (JNICALL *GetStringUTFRegion) + (JNIEnv *env, jstring str, jsize start, jsize len, char *buf); + + void * (JNICALL *GetPrimitiveArrayCritical) + (JNIEnv *env, jarray array, jboolean *isCopy); + void (JNICALL *ReleasePrimitiveArrayCritical) + (JNIEnv *env, jarray array, void *carray, jint mode); + + const jchar * (JNICALL *GetStringCritical) + (JNIEnv *env, jstring string, jboolean *isCopy); + void (JNICALL *ReleaseStringCritical) + (JNIEnv *env, jstring string, const jchar *cstring); + + jweak (JNICALL *NewWeakGlobalRef) + (JNIEnv *env, jobject obj); + void (JNICALL *DeleteWeakGlobalRef) + (JNIEnv *env, jweak ref); + + jboolean (JNICALL *ExceptionCheck) + (JNIEnv *env); + + jobject (JNICALL *NewDirectByteBuffer) + (JNIEnv* env, void* address, jlong capacity); + void* (JNICALL *GetDirectBufferAddress) + (JNIEnv* env, jobject buf); + jlong (JNICALL *GetDirectBufferCapacity) + (JNIEnv* env, jobject buf); + + /* New JNI 1.6 Features */ + + jobjectRefType (JNICALL *GetObjectRefType) + (JNIEnv* env, jobject obj); + + #if TARGET_RT_MAC_CFM && defined(__ppc__) + void* real_functions[228]; + #endif /* TARGET_RT_MAC_CFM && defined(__ppc__) */ +}; + +/* + * We use inlined functions for C++ so that programmers can write: + * + * env->FindClass("java/lang/String") + * + * in C++ rather than: + * + * (*env)->FindClass(env, "java/lang/String") + * + * in C. + */ + +struct JNIEnv_ { + const struct JNINativeInterface_ *functions; +#ifdef __cplusplus + + jint GetVersion() { + return functions->GetVersion(this); + } + jclass DefineClass(const char *name, jobject loader, const jbyte *buf, + jsize len) { + return functions->DefineClass(this, name, loader, buf, len); + } + jclass FindClass(const char *name) { + return functions->FindClass(this, name); + } + jmethodID FromReflectedMethod(jobject method) { + return functions->FromReflectedMethod(this,method); + } + jfieldID FromReflectedField(jobject field) { + return functions->FromReflectedField(this,field); + } + + jobject ToReflectedMethod(jclass cls, jmethodID methodID, jboolean isStatic) { + return functions->ToReflectedMethod(this, cls, methodID, isStatic); + } + + jclass GetSuperclass(jclass sub) { + return functions->GetSuperclass(this, sub); + } + jboolean IsAssignableFrom(jclass sub, jclass sup) { + return functions->IsAssignableFrom(this, sub, sup); + } + + jobject ToReflectedField(jclass cls, jfieldID fieldID, jboolean isStatic) { + return functions->ToReflectedField(this,cls,fieldID,isStatic); + } + + jint Throw(jthrowable obj) { + return functions->Throw(this, obj); + } + jint ThrowNew(jclass clazz, const char *msg) { + return functions->ThrowNew(this, clazz, msg); + } + jthrowable ExceptionOccurred() { + return functions->ExceptionOccurred(this); + } + void ExceptionDescribe() { + functions->ExceptionDescribe(this); + } + void ExceptionClear() { + functions->ExceptionClear(this); + } + void FatalError(const char *msg) { + functions->FatalError(this, msg); + } + + jint PushLocalFrame(jint capacity) { + return functions->PushLocalFrame(this,capacity); + } + jobject PopLocalFrame(jobject result) { + return functions->PopLocalFrame(this,result); + } + + jobject NewGlobalRef(jobject lobj) { + return functions->NewGlobalRef(this,lobj); + } + void DeleteGlobalRef(jobject gref) { + functions->DeleteGlobalRef(this,gref); + } + void DeleteLocalRef(jobject obj) { + functions->DeleteLocalRef(this, obj); + } + + jboolean IsSameObject(jobject obj1, jobject obj2) { + return functions->IsSameObject(this,obj1,obj2); + } + + jobject NewLocalRef(jobject ref) { + return functions->NewLocalRef(this,ref); + } + jint EnsureLocalCapacity(jint capacity) { + return functions->EnsureLocalCapacity(this,capacity); + } + + jobject AllocObject(jclass clazz) { + return functions->AllocObject(this,clazz); + } + jobject NewObject(jclass clazz, jmethodID methodID, ...) { + va_list args; + jobject result; + va_start(args, methodID); + result = functions->NewObjectV(this,clazz,methodID,args); + va_end(args); + return result; + } + jobject NewObjectV(jclass clazz, jmethodID methodID, + va_list args) { + return functions->NewObjectV(this,clazz,methodID,args); + } + jobject NewObjectA(jclass clazz, jmethodID methodID, + const jvalue *args) { + return functions->NewObjectA(this,clazz,methodID,args); + } + + jclass GetObjectClass(jobject obj) { + return functions->GetObjectClass(this,obj); + } + jboolean IsInstanceOf(jobject obj, jclass clazz) { + return functions->IsInstanceOf(this,obj,clazz); + } + + jmethodID GetMethodID(jclass clazz, const char *name, + const char *sig) { + return functions->GetMethodID(this,clazz,name,sig); + } + + jobject CallObjectMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jobject result; + va_start(args,methodID); + result = functions->CallObjectMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jobject CallObjectMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallObjectMethodV(this,obj,methodID,args); + } + jobject CallObjectMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallObjectMethodA(this,obj,methodID,args); + } + + jboolean CallBooleanMethod(jobject obj, + jmethodID methodID, ...) { + va_list args; + jboolean result; + va_start(args,methodID); + result = functions->CallBooleanMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jboolean CallBooleanMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallBooleanMethodV(this,obj,methodID,args); + } + jboolean CallBooleanMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallBooleanMethodA(this,obj,methodID, args); + } + + jbyte CallByteMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jbyte result; + va_start(args,methodID); + result = functions->CallByteMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jbyte CallByteMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallByteMethodV(this,obj,methodID,args); + } + jbyte CallByteMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallByteMethodA(this,obj,methodID,args); + } + + jchar CallCharMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jchar result; + va_start(args,methodID); + result = functions->CallCharMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jchar CallCharMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallCharMethodV(this,obj,methodID,args); + } + jchar CallCharMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallCharMethodA(this,obj,methodID,args); + } + + jshort CallShortMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jshort result; + va_start(args,methodID); + result = functions->CallShortMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jshort CallShortMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallShortMethodV(this,obj,methodID,args); + } + jshort CallShortMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallShortMethodA(this,obj,methodID,args); + } + + jint CallIntMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jint result; + va_start(args,methodID); + result = functions->CallIntMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jint CallIntMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallIntMethodV(this,obj,methodID,args); + } + jint CallIntMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallIntMethodA(this,obj,methodID,args); + } + + jlong CallLongMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jlong result; + va_start(args,methodID); + result = functions->CallLongMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jlong CallLongMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallLongMethodV(this,obj,methodID,args); + } + jlong CallLongMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallLongMethodA(this,obj,methodID,args); + } + + jfloat CallFloatMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jfloat result; + va_start(args,methodID); + result = functions->CallFloatMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jfloat CallFloatMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallFloatMethodV(this,obj,methodID,args); + } + jfloat CallFloatMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallFloatMethodA(this,obj,methodID,args); + } + + jdouble CallDoubleMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + jdouble result; + va_start(args,methodID); + result = functions->CallDoubleMethodV(this,obj,methodID,args); + va_end(args); + return result; + } + jdouble CallDoubleMethodV(jobject obj, jmethodID methodID, + va_list args) { + return functions->CallDoubleMethodV(this,obj,methodID,args); + } + jdouble CallDoubleMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + return functions->CallDoubleMethodA(this,obj,methodID,args); + } + + void CallVoidMethod(jobject obj, jmethodID methodID, ...) { + va_list args; + va_start(args,methodID); + functions->CallVoidMethodV(this,obj,methodID,args); + va_end(args); + } + void CallVoidMethodV(jobject obj, jmethodID methodID, + va_list args) { + functions->CallVoidMethodV(this,obj,methodID,args); + } + void CallVoidMethodA(jobject obj, jmethodID methodID, + const jvalue * args) { + functions->CallVoidMethodA(this,obj,methodID,args); + } + + jobject CallNonvirtualObjectMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jobject result; + va_start(args,methodID); + result = functions->CallNonvirtualObjectMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jobject CallNonvirtualObjectMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualObjectMethodV(this,obj,clazz, + methodID,args); + } + jobject CallNonvirtualObjectMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualObjectMethodA(this,obj,clazz, + methodID,args); + } + + jboolean CallNonvirtualBooleanMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jboolean result; + va_start(args,methodID); + result = functions->CallNonvirtualBooleanMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jboolean CallNonvirtualBooleanMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualBooleanMethodV(this,obj,clazz, + methodID,args); + } + jboolean CallNonvirtualBooleanMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualBooleanMethodA(this,obj,clazz, + methodID, args); + } + + jbyte CallNonvirtualByteMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jbyte result; + va_start(args,methodID); + result = functions->CallNonvirtualByteMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jbyte CallNonvirtualByteMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualByteMethodV(this,obj,clazz, + methodID,args); + } + jbyte CallNonvirtualByteMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualByteMethodA(this,obj,clazz, + methodID,args); + } + + jchar CallNonvirtualCharMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jchar result; + va_start(args,methodID); + result = functions->CallNonvirtualCharMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jchar CallNonvirtualCharMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualCharMethodV(this,obj,clazz, + methodID,args); + } + jchar CallNonvirtualCharMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualCharMethodA(this,obj,clazz, + methodID,args); + } + + jshort CallNonvirtualShortMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jshort result; + va_start(args,methodID); + result = functions->CallNonvirtualShortMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jshort CallNonvirtualShortMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualShortMethodV(this,obj,clazz, + methodID,args); + } + jshort CallNonvirtualShortMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualShortMethodA(this,obj,clazz, + methodID,args); + } + + jint CallNonvirtualIntMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jint result; + va_start(args,methodID); + result = functions->CallNonvirtualIntMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jint CallNonvirtualIntMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualIntMethodV(this,obj,clazz, + methodID,args); + } + jint CallNonvirtualIntMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualIntMethodA(this,obj,clazz, + methodID,args); + } + + jlong CallNonvirtualLongMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jlong result; + va_start(args,methodID); + result = functions->CallNonvirtualLongMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jlong CallNonvirtualLongMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallNonvirtualLongMethodV(this,obj,clazz, + methodID,args); + } + jlong CallNonvirtualLongMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue * args) { + return functions->CallNonvirtualLongMethodA(this,obj,clazz, + methodID,args); + } + + jfloat CallNonvirtualFloatMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jfloat result; + va_start(args,methodID); + result = functions->CallNonvirtualFloatMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jfloat CallNonvirtualFloatMethodV(jobject obj, jclass clazz, + jmethodID methodID, + va_list args) { + return functions->CallNonvirtualFloatMethodV(this,obj,clazz, + methodID,args); + } + jfloat CallNonvirtualFloatMethodA(jobject obj, jclass clazz, + jmethodID methodID, + const jvalue * args) { + return functions->CallNonvirtualFloatMethodA(this,obj,clazz, + methodID,args); + } + + jdouble CallNonvirtualDoubleMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + jdouble result; + va_start(args,methodID); + result = functions->CallNonvirtualDoubleMethodV(this,obj,clazz, + methodID,args); + va_end(args); + return result; + } + jdouble CallNonvirtualDoubleMethodV(jobject obj, jclass clazz, + jmethodID methodID, + va_list args) { + return functions->CallNonvirtualDoubleMethodV(this,obj,clazz, + methodID,args); + } + jdouble CallNonvirtualDoubleMethodA(jobject obj, jclass clazz, + jmethodID methodID, + const jvalue * args) { + return functions->CallNonvirtualDoubleMethodA(this,obj,clazz, + methodID,args); + } + + void CallNonvirtualVoidMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) { + va_list args; + va_start(args,methodID); + functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args); + va_end(args); + } + void CallNonvirtualVoidMethodV(jobject obj, jclass clazz, + jmethodID methodID, + va_list args) { + functions->CallNonvirtualVoidMethodV(this,obj,clazz,methodID,args); + } + void CallNonvirtualVoidMethodA(jobject obj, jclass clazz, + jmethodID methodID, + const jvalue * args) { + functions->CallNonvirtualVoidMethodA(this,obj,clazz,methodID,args); + } + + jfieldID GetFieldID(jclass clazz, const char *name, + const char *sig) { + return functions->GetFieldID(this,clazz,name,sig); + } + + jobject GetObjectField(jobject obj, jfieldID fieldID) { + return functions->GetObjectField(this,obj,fieldID); + } + jboolean GetBooleanField(jobject obj, jfieldID fieldID) { + return functions->GetBooleanField(this,obj,fieldID); + } + jbyte GetByteField(jobject obj, jfieldID fieldID) { + return functions->GetByteField(this,obj,fieldID); + } + jchar GetCharField(jobject obj, jfieldID fieldID) { + return functions->GetCharField(this,obj,fieldID); + } + jshort GetShortField(jobject obj, jfieldID fieldID) { + return functions->GetShortField(this,obj,fieldID); + } + jint GetIntField(jobject obj, jfieldID fieldID) { + return functions->GetIntField(this,obj,fieldID); + } + jlong GetLongField(jobject obj, jfieldID fieldID) { + return functions->GetLongField(this,obj,fieldID); + } + jfloat GetFloatField(jobject obj, jfieldID fieldID) { + return functions->GetFloatField(this,obj,fieldID); + } + jdouble GetDoubleField(jobject obj, jfieldID fieldID) { + return functions->GetDoubleField(this,obj,fieldID); + } + + void SetObjectField(jobject obj, jfieldID fieldID, jobject val) { + functions->SetObjectField(this,obj,fieldID,val); + } + void SetBooleanField(jobject obj, jfieldID fieldID, + jboolean val) { + functions->SetBooleanField(this,obj,fieldID,val); + } + void SetByteField(jobject obj, jfieldID fieldID, + jbyte val) { + functions->SetByteField(this,obj,fieldID,val); + } + void SetCharField(jobject obj, jfieldID fieldID, + jchar val) { + functions->SetCharField(this,obj,fieldID,val); + } + void SetShortField(jobject obj, jfieldID fieldID, + jshort val) { + functions->SetShortField(this,obj,fieldID,val); + } + void SetIntField(jobject obj, jfieldID fieldID, + jint val) { + functions->SetIntField(this,obj,fieldID,val); + } + void SetLongField(jobject obj, jfieldID fieldID, + jlong val) { + functions->SetLongField(this,obj,fieldID,val); + } + void SetFloatField(jobject obj, jfieldID fieldID, + jfloat val) { + functions->SetFloatField(this,obj,fieldID,val); + } + void SetDoubleField(jobject obj, jfieldID fieldID, + jdouble val) { + functions->SetDoubleField(this,obj,fieldID,val); + } + + jmethodID GetStaticMethodID(jclass clazz, const char *name, + const char *sig) { + return functions->GetStaticMethodID(this,clazz,name,sig); + } + + jobject CallStaticObjectMethod(jclass clazz, jmethodID methodID, + ...) { + va_list args; + jobject result; + va_start(args,methodID); + result = functions->CallStaticObjectMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jobject CallStaticObjectMethodV(jclass clazz, jmethodID methodID, + va_list args) { + return functions->CallStaticObjectMethodV(this,clazz,methodID,args); + } + jobject CallStaticObjectMethodA(jclass clazz, jmethodID methodID, + const jvalue *args) { + return functions->CallStaticObjectMethodA(this,clazz,methodID,args); + } + + jboolean CallStaticBooleanMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jboolean result; + va_start(args,methodID); + result = functions->CallStaticBooleanMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jboolean CallStaticBooleanMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticBooleanMethodV(this,clazz,methodID,args); + } + jboolean CallStaticBooleanMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticBooleanMethodA(this,clazz,methodID,args); + } + + jbyte CallStaticByteMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jbyte result; + va_start(args,methodID); + result = functions->CallStaticByteMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jbyte CallStaticByteMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticByteMethodV(this,clazz,methodID,args); + } + jbyte CallStaticByteMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticByteMethodA(this,clazz,methodID,args); + } + + jchar CallStaticCharMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jchar result; + va_start(args,methodID); + result = functions->CallStaticCharMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jchar CallStaticCharMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticCharMethodV(this,clazz,methodID,args); + } + jchar CallStaticCharMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticCharMethodA(this,clazz,methodID,args); + } + + jshort CallStaticShortMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jshort result; + va_start(args,methodID); + result = functions->CallStaticShortMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jshort CallStaticShortMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticShortMethodV(this,clazz,methodID,args); + } + jshort CallStaticShortMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticShortMethodA(this,clazz,methodID,args); + } + + jint CallStaticIntMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jint result; + va_start(args,methodID); + result = functions->CallStaticIntMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jint CallStaticIntMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticIntMethodV(this,clazz,methodID,args); + } + jint CallStaticIntMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticIntMethodA(this,clazz,methodID,args); + } + + jlong CallStaticLongMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jlong result; + va_start(args,methodID); + result = functions->CallStaticLongMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jlong CallStaticLongMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticLongMethodV(this,clazz,methodID,args); + } + jlong CallStaticLongMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticLongMethodA(this,clazz,methodID,args); + } + + jfloat CallStaticFloatMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jfloat result; + va_start(args,methodID); + result = functions->CallStaticFloatMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jfloat CallStaticFloatMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticFloatMethodV(this,clazz,methodID,args); + } + jfloat CallStaticFloatMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticFloatMethodA(this,clazz,methodID,args); + } + + jdouble CallStaticDoubleMethod(jclass clazz, + jmethodID methodID, ...) { + va_list args; + jdouble result; + va_start(args,methodID); + result = functions->CallStaticDoubleMethodV(this,clazz,methodID,args); + va_end(args); + return result; + } + jdouble CallStaticDoubleMethodV(jclass clazz, + jmethodID methodID, va_list args) { + return functions->CallStaticDoubleMethodV(this,clazz,methodID,args); + } + jdouble CallStaticDoubleMethodA(jclass clazz, + jmethodID methodID, const jvalue *args) { + return functions->CallStaticDoubleMethodA(this,clazz,methodID,args); + } + + void CallStaticVoidMethod(jclass cls, jmethodID methodID, ...) { + va_list args; + va_start(args,methodID); + functions->CallStaticVoidMethodV(this,cls,methodID,args); + va_end(args); + } + void CallStaticVoidMethodV(jclass cls, jmethodID methodID, + va_list args) { + functions->CallStaticVoidMethodV(this,cls,methodID,args); + } + void CallStaticVoidMethodA(jclass cls, jmethodID methodID, + const jvalue * args) { + functions->CallStaticVoidMethodA(this,cls,methodID,args); + } + + jfieldID GetStaticFieldID(jclass clazz, const char *name, + const char *sig) { + return functions->GetStaticFieldID(this,clazz,name,sig); + } + jobject GetStaticObjectField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticObjectField(this,clazz,fieldID); + } + jboolean GetStaticBooleanField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticBooleanField(this,clazz,fieldID); + } + jbyte GetStaticByteField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticByteField(this,clazz,fieldID); + } + jchar GetStaticCharField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticCharField(this,clazz,fieldID); + } + jshort GetStaticShortField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticShortField(this,clazz,fieldID); + } + jint GetStaticIntField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticIntField(this,clazz,fieldID); + } + jlong GetStaticLongField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticLongField(this,clazz,fieldID); + } + jfloat GetStaticFloatField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticFloatField(this,clazz,fieldID); + } + jdouble GetStaticDoubleField(jclass clazz, jfieldID fieldID) { + return functions->GetStaticDoubleField(this,clazz,fieldID); + } + + void SetStaticObjectField(jclass clazz, jfieldID fieldID, + jobject value) { + functions->SetStaticObjectField(this,clazz,fieldID,value); + } + void SetStaticBooleanField(jclass clazz, jfieldID fieldID, + jboolean value) { + functions->SetStaticBooleanField(this,clazz,fieldID,value); + } + void SetStaticByteField(jclass clazz, jfieldID fieldID, + jbyte value) { + functions->SetStaticByteField(this,clazz,fieldID,value); + } + void SetStaticCharField(jclass clazz, jfieldID fieldID, + jchar value) { + functions->SetStaticCharField(this,clazz,fieldID,value); + } + void SetStaticShortField(jclass clazz, jfieldID fieldID, + jshort value) { + functions->SetStaticShortField(this,clazz,fieldID,value); + } + void SetStaticIntField(jclass clazz, jfieldID fieldID, + jint value) { + functions->SetStaticIntField(this,clazz,fieldID,value); + } + void SetStaticLongField(jclass clazz, jfieldID fieldID, + jlong value) { + functions->SetStaticLongField(this,clazz,fieldID,value); + } + void SetStaticFloatField(jclass clazz, jfieldID fieldID, + jfloat value) { + functions->SetStaticFloatField(this,clazz,fieldID,value); + } + void SetStaticDoubleField(jclass clazz, jfieldID fieldID, + jdouble value) { + functions->SetStaticDoubleField(this,clazz,fieldID,value); + } + + jstring NewString(const jchar *unicode, jsize len) { + return functions->NewString(this,unicode,len); + } + jsize GetStringLength(jstring str) { + return functions->GetStringLength(this,str); + } + const jchar *GetStringChars(jstring str, jboolean *isCopy) { + return functions->GetStringChars(this,str,isCopy); + } + void ReleaseStringChars(jstring str, const jchar *chars) { + functions->ReleaseStringChars(this,str,chars); + } + + jstring NewStringUTF(const char *utf) { + return functions->NewStringUTF(this,utf); + } + jsize GetStringUTFLength(jstring str) { + return functions->GetStringUTFLength(this,str); + } + const char* GetStringUTFChars(jstring str, jboolean *isCopy) { + return functions->GetStringUTFChars(this,str,isCopy); + } + void ReleaseStringUTFChars(jstring str, const char* chars) { + functions->ReleaseStringUTFChars(this,str,chars); + } + + jsize GetArrayLength(jarray array) { + return functions->GetArrayLength(this,array); + } + + jobjectArray NewObjectArray(jsize len, jclass clazz, + jobject init) { + return functions->NewObjectArray(this,len,clazz,init); + } + jobject GetObjectArrayElement(jobjectArray array, jsize index) { + return functions->GetObjectArrayElement(this,array,index); + } + void SetObjectArrayElement(jobjectArray array, jsize index, + jobject val) { + functions->SetObjectArrayElement(this,array,index,val); + } + + jbooleanArray NewBooleanArray(jsize len) { + return functions->NewBooleanArray(this,len); + } + jbyteArray NewByteArray(jsize len) { + return functions->NewByteArray(this,len); + } + jcharArray NewCharArray(jsize len) { + return functions->NewCharArray(this,len); + } + jshortArray NewShortArray(jsize len) { + return functions->NewShortArray(this,len); + } + jintArray NewIntArray(jsize len) { + return functions->NewIntArray(this,len); + } + jlongArray NewLongArray(jsize len) { + return functions->NewLongArray(this,len); + } + jfloatArray NewFloatArray(jsize len) { + return functions->NewFloatArray(this,len); + } + jdoubleArray NewDoubleArray(jsize len) { + return functions->NewDoubleArray(this,len); + } + + jboolean * GetBooleanArrayElements(jbooleanArray array, jboolean *isCopy) { + return functions->GetBooleanArrayElements(this,array,isCopy); + } + jbyte * GetByteArrayElements(jbyteArray array, jboolean *isCopy) { + return functions->GetByteArrayElements(this,array,isCopy); + } + jchar * GetCharArrayElements(jcharArray array, jboolean *isCopy) { + return functions->GetCharArrayElements(this,array,isCopy); + } + jshort * GetShortArrayElements(jshortArray array, jboolean *isCopy) { + return functions->GetShortArrayElements(this,array,isCopy); + } + jint * GetIntArrayElements(jintArray array, jboolean *isCopy) { + return functions->GetIntArrayElements(this,array,isCopy); + } + jlong * GetLongArrayElements(jlongArray array, jboolean *isCopy) { + return functions->GetLongArrayElements(this,array,isCopy); + } + jfloat * GetFloatArrayElements(jfloatArray array, jboolean *isCopy) { + return functions->GetFloatArrayElements(this,array,isCopy); + } + jdouble * GetDoubleArrayElements(jdoubleArray array, jboolean *isCopy) { + return functions->GetDoubleArrayElements(this,array,isCopy); + } + + void ReleaseBooleanArrayElements(jbooleanArray array, + jboolean *elems, + jint mode) { + functions->ReleaseBooleanArrayElements(this,array,elems,mode); + } + void ReleaseByteArrayElements(jbyteArray array, + jbyte *elems, + jint mode) { + functions->ReleaseByteArrayElements(this,array,elems,mode); + } + void ReleaseCharArrayElements(jcharArray array, + jchar *elems, + jint mode) { + functions->ReleaseCharArrayElements(this,array,elems,mode); + } + void ReleaseShortArrayElements(jshortArray array, + jshort *elems, + jint mode) { + functions->ReleaseShortArrayElements(this,array,elems,mode); + } + void ReleaseIntArrayElements(jintArray array, + jint *elems, + jint mode) { + functions->ReleaseIntArrayElements(this,array,elems,mode); + } + void ReleaseLongArrayElements(jlongArray array, + jlong *elems, + jint mode) { + functions->ReleaseLongArrayElements(this,array,elems,mode); + } + void ReleaseFloatArrayElements(jfloatArray array, + jfloat *elems, + jint mode) { + functions->ReleaseFloatArrayElements(this,array,elems,mode); + } + void ReleaseDoubleArrayElements(jdoubleArray array, + jdouble *elems, + jint mode) { + functions->ReleaseDoubleArrayElements(this,array,elems,mode); + } + + void GetBooleanArrayRegion(jbooleanArray array, + jsize start, jsize len, jboolean *buf) { + functions->GetBooleanArrayRegion(this,array,start,len,buf); + } + void GetByteArrayRegion(jbyteArray array, + jsize start, jsize len, jbyte *buf) { + functions->GetByteArrayRegion(this,array,start,len,buf); + } + void GetCharArrayRegion(jcharArray array, + jsize start, jsize len, jchar *buf) { + functions->GetCharArrayRegion(this,array,start,len,buf); + } + void GetShortArrayRegion(jshortArray array, + jsize start, jsize len, jshort *buf) { + functions->GetShortArrayRegion(this,array,start,len,buf); + } + void GetIntArrayRegion(jintArray array, + jsize start, jsize len, jint *buf) { + functions->GetIntArrayRegion(this,array,start,len,buf); + } + void GetLongArrayRegion(jlongArray array, + jsize start, jsize len, jlong *buf) { + functions->GetLongArrayRegion(this,array,start,len,buf); + } + void GetFloatArrayRegion(jfloatArray array, + jsize start, jsize len, jfloat *buf) { + functions->GetFloatArrayRegion(this,array,start,len,buf); + } + void GetDoubleArrayRegion(jdoubleArray array, + jsize start, jsize len, jdouble *buf) { + functions->GetDoubleArrayRegion(this,array,start,len,buf); + } + + void SetBooleanArrayRegion(jbooleanArray array, jsize start, jsize len, + const jboolean *buf) { + functions->SetBooleanArrayRegion(this,array,start,len,buf); + } + void SetByteArrayRegion(jbyteArray array, jsize start, jsize len, + const jbyte *buf) { + functions->SetByteArrayRegion(this,array,start,len,buf); + } + void SetCharArrayRegion(jcharArray array, jsize start, jsize len, + const jchar *buf) { + functions->SetCharArrayRegion(this,array,start,len,buf); + } + void SetShortArrayRegion(jshortArray array, jsize start, jsize len, + const jshort *buf) { + functions->SetShortArrayRegion(this,array,start,len,buf); + } + void SetIntArrayRegion(jintArray array, jsize start, jsize len, + const jint *buf) { + functions->SetIntArrayRegion(this,array,start,len,buf); + } + void SetLongArrayRegion(jlongArray array, jsize start, jsize len, + const jlong *buf) { + functions->SetLongArrayRegion(this,array,start,len,buf); + } + void SetFloatArrayRegion(jfloatArray array, jsize start, jsize len, + const jfloat *buf) { + functions->SetFloatArrayRegion(this,array,start,len,buf); + } + void SetDoubleArrayRegion(jdoubleArray array, jsize start, jsize len, + const jdouble *buf) { + functions->SetDoubleArrayRegion(this,array,start,len,buf); + } + + jint RegisterNatives(jclass clazz, const JNINativeMethod *methods, + jint nMethods) { + return functions->RegisterNatives(this,clazz,methods,nMethods); + } + jint UnregisterNatives(jclass clazz) { + return functions->UnregisterNatives(this,clazz); + } + + jint MonitorEnter(jobject obj) { + return functions->MonitorEnter(this,obj); + } + jint MonitorExit(jobject obj) { + return functions->MonitorExit(this,obj); + } + + jint GetJavaVM(JavaVM **vm) { + return functions->GetJavaVM(this,vm); + } + + void GetStringRegion(jstring str, jsize start, jsize len, jchar *buf) { + functions->GetStringRegion(this,str,start,len,buf); + } + void GetStringUTFRegion(jstring str, jsize start, jsize len, char *buf) { + functions->GetStringUTFRegion(this,str,start,len,buf); + } + + void * GetPrimitiveArrayCritical(jarray array, jboolean *isCopy) { + return functions->GetPrimitiveArrayCritical(this,array,isCopy); + } + void ReleasePrimitiveArrayCritical(jarray array, void *carray, jint mode) { + functions->ReleasePrimitiveArrayCritical(this,array,carray,mode); + } + + const jchar * GetStringCritical(jstring string, jboolean *isCopy) { + return functions->GetStringCritical(this,string,isCopy); + } + void ReleaseStringCritical(jstring string, const jchar *cstring) { + functions->ReleaseStringCritical(this,string,cstring); + } + + jweak NewWeakGlobalRef(jobject obj) { + return functions->NewWeakGlobalRef(this,obj); + } + void DeleteWeakGlobalRef(jweak ref) { + functions->DeleteWeakGlobalRef(this,ref); + } + + jboolean ExceptionCheck() { + return functions->ExceptionCheck(this); + } + + jobject NewDirectByteBuffer(void* address, jlong capacity) { + return functions->NewDirectByteBuffer(this, address, capacity); + } + void* GetDirectBufferAddress(jobject buf) { + return functions->GetDirectBufferAddress(this, buf); + } + jlong GetDirectBufferCapacity(jobject buf) { + return functions->GetDirectBufferCapacity(this, buf); + } + jobjectRefType GetObjectRefType(jobject obj) { + return functions->GetObjectRefType(this, obj); + } + +#endif /* __cplusplus */ +}; + +typedef struct JavaVMOption { + char *optionString; + void *extraInfo; +} JavaVMOption; + +typedef struct JavaVMInitArgs { + jint version; + + jint nOptions; + JavaVMOption *options; + jboolean ignoreUnrecognized; +} JavaVMInitArgs; + +typedef struct JavaVMAttachArgs { + jint version; + + char *name; + jobject group; +} JavaVMAttachArgs; + +/* These will be VM-specific. */ + +#define JDK1_2 +#define JDK1_4 + +/* End VM-specific. */ + +struct JNIInvokeInterface_ { + void *reserved0; + void *reserved1; + void *reserved2; + +#if !TARGET_RT_MAC_CFM && defined(__ppc__) + void* cfm_vectors[4]; +#endif /* !TARGET_RT_MAC_CFM && defined(__ppc__) */ + + jint (JNICALL *DestroyJavaVM)(JavaVM *vm); + + jint (JNICALL *AttachCurrentThread)(JavaVM *vm, void **penv, void *args); + + jint (JNICALL *DetachCurrentThread)(JavaVM *vm); + + jint (JNICALL *GetEnv)(JavaVM *vm, void **penv, jint version); + + jint (JNICALL *AttachCurrentThreadAsDaemon)(JavaVM *vm, void **penv, void *args); + +#if TARGET_RT_MAC_CFM && defined(__ppc__) + void* real_functions[5]; +#endif /* TARGET_RT_MAC_CFM && defined(__ppc__) */ +}; + +struct JavaVM_ { + const struct JNIInvokeInterface_ *functions; +#ifdef __cplusplus + + jint DestroyJavaVM() { + return functions->DestroyJavaVM(this); + } + jint AttachCurrentThread(void **penv, void *args) { + return functions->AttachCurrentThread(this, penv, args); + } + jint DetachCurrentThread() { + return functions->DetachCurrentThread(this); + } + + jint GetEnv(void **penv, jint version) { + return functions->GetEnv(this, penv, version); + } + jint AttachCurrentThreadAsDaemon(void **penv, void *args) { + return functions->AttachCurrentThreadAsDaemon(this, penv, args); + } +#endif +}; + +#ifdef _JNI_IMPLEMENTATION_ +#define _JNI_IMPORT_OR_EXPORT_ JNIEXPORT +#else +#define _JNI_IMPORT_OR_EXPORT_ JNIIMPORT +#endif +_JNI_IMPORT_OR_EXPORT_ __attribute__((deprecated)) jint JNICALL +JNI_GetDefaultJavaVMInitArgs(void *args); + +_JNI_IMPORT_OR_EXPORT_ __attribute__((deprecated)) jint JNICALL +JNI_CreateJavaVM(JavaVM **pvm, void **penv, void *args); + +_JNI_IMPORT_OR_EXPORT_ __attribute__((deprecated)) jint JNICALL +JNI_GetCreatedJavaVMs(JavaVM **, jsize, jsize *); + +/* Defined by native libraries. */ +JNIEXPORT jint JNICALL +JNI_OnLoad(JavaVM *vm, void *reserved); + +JNIEXPORT void JNICALL +JNI_OnUnload(JavaVM *vm, void *reserved); + +#define JNI_VERSION_1_1 0x00010001 +#define JNI_VERSION_1_2 0x00010002 +#define JNI_VERSION_1_4 0x00010004 +#define JNI_VERSION_1_6 0x00010006 + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* !_JAVASOFT_JNI_H_ */ + + + diff --git a/src/client/jni/jni/jni_md.h b/src/client/jni/jni/jni_md.h new file mode 100755 index 000000000000..21cc90b33802 --- /dev/null +++ b/src/client/jni/jni/jni_md.h @@ -0,0 +1,23 @@ +/* + * @(#)jni_md.h 1.19 05/11/17 + * + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + +#ifndef _JAVASOFT_JNI_MD_H_ +#define _JAVASOFT_JNI_MD_H_ + +#define JNIEXPORT __attribute__((visibility("default"))) +#define JNIIMPORT +#define JNICALL + +#if defined(__LP64__) && __LP64__ /* for -Wundef */ +typedef int jint; +#else +typedef long jint; +#endif +typedef long long jlong; +typedef signed char jbyte; + +#endif /* !_JAVASOFT_JNI_MD_H_ */ diff --git a/src/client/jni/jni/jvmti.h b/src/client/jni/jni/jvmti.h new file mode 100755 index 000000000000..a0e1f0444e71 --- /dev/null +++ b/src/client/jni/jni/jvmti.h @@ -0,0 +1,2504 @@ +#ifdef USE_PRAGMA_IDENT_HDR +#pragma ident "@(#)jvmtiLib.xsl 1.38 06/08/02 23:22:31 JVM" +#endif +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + */ + + /* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */ + + + /* Include file for the Java(tm) Virtual Machine Tool Interface */ + +#ifndef _JAVA_JVMTI_H_ +#define _JAVA_JVMTI_H_ + +#include "jni.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum { + JVMTI_VERSION_1 = 0x30010000, + JVMTI_VERSION_1_0 = 0x30010000, + JVMTI_VERSION_1_1 = 0x30010100, + + JVMTI_VERSION = 0x30000000 + (1 * 0x10000) + (1 * 0x100) + 102 /* version: 1.1.102 */ +}; + +JNIEXPORT jint JNICALL __attribute__((deprecated)) +Agent_OnLoad(JavaVM *vm, char *options, void *reserved); + +JNIEXPORT jint JNICALL __attribute__((deprecated)) +Agent_OnAttach(JavaVM* vm, char* options, void* reserved); + +JNIEXPORT void JNICALL __attribute__((deprecated)) +Agent_OnUnload(JavaVM *vm); + + /* Forward declaration of the environment */ + +struct _jvmtiEnv; + +struct jvmtiInterface_1_; + +#ifdef __cplusplus +typedef _jvmtiEnv jvmtiEnv; +#else +typedef const struct jvmtiInterface_1_ *jvmtiEnv; +#endif /* __cplusplus */ + +/* Derived Base Types */ + +typedef jobject jthread; +typedef jobject jthreadGroup; +typedef jlong jlocation; +struct _jrawMonitorID; +typedef struct _jrawMonitorID *jrawMonitorID; +typedef struct JNINativeInterface_ jniNativeInterface; + + /* Constants */ + + + /* Thread State Flags */ + +enum { + JVMTI_THREAD_STATE_ALIVE = 0x0001, + JVMTI_THREAD_STATE_TERMINATED = 0x0002, + JVMTI_THREAD_STATE_RUNNABLE = 0x0004, + JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER = 0x0400, + JVMTI_THREAD_STATE_WAITING = 0x0080, + JVMTI_THREAD_STATE_WAITING_INDEFINITELY = 0x0010, + JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT = 0x0020, + JVMTI_THREAD_STATE_SLEEPING = 0x0040, + JVMTI_THREAD_STATE_IN_OBJECT_WAIT = 0x0100, + JVMTI_THREAD_STATE_PARKED = 0x0200, + JVMTI_THREAD_STATE_SUSPENDED = 0x100000, + JVMTI_THREAD_STATE_INTERRUPTED = 0x200000, + JVMTI_THREAD_STATE_IN_NATIVE = 0x400000, + JVMTI_THREAD_STATE_VENDOR_1 = 0x10000000, + JVMTI_THREAD_STATE_VENDOR_2 = 0x20000000, + JVMTI_THREAD_STATE_VENDOR_3 = 0x40000000 +}; + + /* java.lang.Thread.State Conversion Masks */ + +enum { + JVMTI_JAVA_LANG_THREAD_STATE_MASK = JVMTI_THREAD_STATE_TERMINATED | JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_RUNNABLE | JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_INDEFINITELY | JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT, + JVMTI_JAVA_LANG_THREAD_STATE_NEW = 0, + JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED = JVMTI_THREAD_STATE_TERMINATED, + JVMTI_JAVA_LANG_THREAD_STATE_RUNNABLE = JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_RUNNABLE, + JVMTI_JAVA_LANG_THREAD_STATE_BLOCKED = JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER, + JVMTI_JAVA_LANG_THREAD_STATE_WAITING = JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_INDEFINITELY, + JVMTI_JAVA_LANG_THREAD_STATE_TIMED_WAITING = JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_WAITING | JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT +}; + + /* Thread Priority Constants */ + +enum { + JVMTI_THREAD_MIN_PRIORITY = 1, + JVMTI_THREAD_NORM_PRIORITY = 5, + JVMTI_THREAD_MAX_PRIORITY = 10 +}; + + /* Heap Filter Flags */ + +enum { + JVMTI_HEAP_FILTER_TAGGED = 0x4, + JVMTI_HEAP_FILTER_UNTAGGED = 0x8, + JVMTI_HEAP_FILTER_CLASS_TAGGED = 0x10, + JVMTI_HEAP_FILTER_CLASS_UNTAGGED = 0x20 +}; + + /* Heap Visit Control Flags */ + +enum { + JVMTI_VISIT_OBJECTS = 0x100, + JVMTI_VISIT_ABORT = 0x8000 +}; + + /* Heap Reference Enumeration */ + +typedef enum { + JVMTI_HEAP_REFERENCE_CLASS = 1, + JVMTI_HEAP_REFERENCE_FIELD = 2, + JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT = 3, + JVMTI_HEAP_REFERENCE_CLASS_LOADER = 4, + JVMTI_HEAP_REFERENCE_SIGNERS = 5, + JVMTI_HEAP_REFERENCE_PROTECTION_DOMAIN = 6, + JVMTI_HEAP_REFERENCE_INTERFACE = 7, + JVMTI_HEAP_REFERENCE_STATIC_FIELD = 8, + JVMTI_HEAP_REFERENCE_CONSTANT_POOL = 9, + JVMTI_HEAP_REFERENCE_SUPERCLASS = 10, + JVMTI_HEAP_REFERENCE_JNI_GLOBAL = 21, + JVMTI_HEAP_REFERENCE_SYSTEM_CLASS = 22, + JVMTI_HEAP_REFERENCE_MONITOR = 23, + JVMTI_HEAP_REFERENCE_STACK_LOCAL = 24, + JVMTI_HEAP_REFERENCE_JNI_LOCAL = 25, + JVMTI_HEAP_REFERENCE_THREAD = 26, + JVMTI_HEAP_REFERENCE_OTHER = 27 +} jvmtiHeapReferenceKind; + + /* Primitive Type Enumeration */ + +typedef enum { + JVMTI_PRIMITIVE_TYPE_BOOLEAN = 90, + JVMTI_PRIMITIVE_TYPE_BYTE = 66, + JVMTI_PRIMITIVE_TYPE_CHAR = 67, + JVMTI_PRIMITIVE_TYPE_SHORT = 83, + JVMTI_PRIMITIVE_TYPE_INT = 73, + JVMTI_PRIMITIVE_TYPE_LONG = 74, + JVMTI_PRIMITIVE_TYPE_FLOAT = 70, + JVMTI_PRIMITIVE_TYPE_DOUBLE = 68 +} jvmtiPrimitiveType; + + /* Heap Object Filter Enumeration */ + +typedef enum { + JVMTI_HEAP_OBJECT_TAGGED = 1, + JVMTI_HEAP_OBJECT_UNTAGGED = 2, + JVMTI_HEAP_OBJECT_EITHER = 3 +} jvmtiHeapObjectFilter; + + /* Heap Root Kind Enumeration */ + +typedef enum { + JVMTI_HEAP_ROOT_JNI_GLOBAL = 1, + JVMTI_HEAP_ROOT_SYSTEM_CLASS = 2, + JVMTI_HEAP_ROOT_MONITOR = 3, + JVMTI_HEAP_ROOT_STACK_LOCAL = 4, + JVMTI_HEAP_ROOT_JNI_LOCAL = 5, + JVMTI_HEAP_ROOT_THREAD = 6, + JVMTI_HEAP_ROOT_OTHER = 7 +} jvmtiHeapRootKind; + + /* Object Reference Enumeration */ + +typedef enum { + JVMTI_REFERENCE_CLASS = 1, + JVMTI_REFERENCE_FIELD = 2, + JVMTI_REFERENCE_ARRAY_ELEMENT = 3, + JVMTI_REFERENCE_CLASS_LOADER = 4, + JVMTI_REFERENCE_SIGNERS = 5, + JVMTI_REFERENCE_PROTECTION_DOMAIN = 6, + JVMTI_REFERENCE_INTERFACE = 7, + JVMTI_REFERENCE_STATIC_FIELD = 8, + JVMTI_REFERENCE_CONSTANT_POOL = 9 +} jvmtiObjectReferenceKind; + + /* Iteration Control Enumeration */ + +typedef enum { + JVMTI_ITERATION_CONTINUE = 1, + JVMTI_ITERATION_IGNORE = 2, + JVMTI_ITERATION_ABORT = 0 +} jvmtiIterationControl; + + /* Class Status Flags */ + +enum { + JVMTI_CLASS_STATUS_VERIFIED = 1, + JVMTI_CLASS_STATUS_PREPARED = 2, + JVMTI_CLASS_STATUS_INITIALIZED = 4, + JVMTI_CLASS_STATUS_ERROR = 8, + JVMTI_CLASS_STATUS_ARRAY = 16, + JVMTI_CLASS_STATUS_PRIMITIVE = 32 +}; + + /* Event Enable/Disable */ + +typedef enum { + JVMTI_ENABLE = 1, + JVMTI_DISABLE = 0 +} jvmtiEventMode; + + /* Extension Function/Event Parameter Types */ + +typedef enum { + JVMTI_TYPE_JBYTE = 101, + JVMTI_TYPE_JCHAR = 102, + JVMTI_TYPE_JSHORT = 103, + JVMTI_TYPE_JINT = 104, + JVMTI_TYPE_JLONG = 105, + JVMTI_TYPE_JFLOAT = 106, + JVMTI_TYPE_JDOUBLE = 107, + JVMTI_TYPE_JBOOLEAN = 108, + JVMTI_TYPE_JOBJECT = 109, + JVMTI_TYPE_JTHREAD = 110, + JVMTI_TYPE_JCLASS = 111, + JVMTI_TYPE_JVALUE = 112, + JVMTI_TYPE_JFIELDID = 113, + JVMTI_TYPE_JMETHODID = 114, + JVMTI_TYPE_CCHAR = 115, + JVMTI_TYPE_CVOID = 116, + JVMTI_TYPE_JNIENV = 117 +} jvmtiParamTypes; + + /* Extension Function/Event Parameter Kinds */ + +typedef enum { + JVMTI_KIND_IN = 91, + JVMTI_KIND_IN_PTR = 92, + JVMTI_KIND_IN_BUF = 93, + JVMTI_KIND_ALLOC_BUF = 94, + JVMTI_KIND_ALLOC_ALLOC_BUF = 95, + JVMTI_KIND_OUT = 96, + JVMTI_KIND_OUT_BUF = 97 +} jvmtiParamKind; + + /* Timer Kinds */ + +typedef enum { + JVMTI_TIMER_USER_CPU = 30, + JVMTI_TIMER_TOTAL_CPU = 31, + JVMTI_TIMER_ELAPSED = 32 +} jvmtiTimerKind; + + /* Phases of execution */ + +typedef enum { + JVMTI_PHASE_ONLOAD = 1, + JVMTI_PHASE_PRIMORDIAL = 2, + JVMTI_PHASE_START = 6, + JVMTI_PHASE_LIVE = 4, + JVMTI_PHASE_DEAD = 8 +} jvmtiPhase; + + /* Version Interface Types */ + +enum { + JVMTI_VERSION_INTERFACE_JNI = 0x00000000, + JVMTI_VERSION_INTERFACE_JVMTI = 0x30000000 +}; + + /* Version Masks */ + +enum { + JVMTI_VERSION_MASK_INTERFACE_TYPE = 0x70000000, + JVMTI_VERSION_MASK_MAJOR = 0x0FFF0000, + JVMTI_VERSION_MASK_MINOR = 0x0000FF00, + JVMTI_VERSION_MASK_MICRO = 0x000000FF +}; + + /* Version Shifts */ + +enum { + JVMTI_VERSION_SHIFT_MAJOR = 16, + JVMTI_VERSION_SHIFT_MINOR = 8, + JVMTI_VERSION_SHIFT_MICRO = 0 +}; + + /* Verbose Flag Enumeration */ + +typedef enum { + JVMTI_VERBOSE_OTHER = 0, + JVMTI_VERBOSE_GC = 1, + JVMTI_VERBOSE_CLASS = 2, + JVMTI_VERBOSE_JNI = 4 +} jvmtiVerboseFlag; + + /* JLocation Format Enumeration */ + +typedef enum { + JVMTI_JLOCATION_JVMBCI = 1, + JVMTI_JLOCATION_MACHINEPC = 2, + JVMTI_JLOCATION_OTHER = 0 +} jvmtiJlocationFormat; + + /* Resource Exhaustion Flags */ + +enum { + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR = 0x0001, + JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP = 0x0002, + JVMTI_RESOURCE_EXHAUSTED_THREADS = 0x0004 +}; + + /* Errors */ + +typedef enum { + JVMTI_ERROR_NONE = 0, + JVMTI_ERROR_INVALID_THREAD = 10, + JVMTI_ERROR_INVALID_THREAD_GROUP = 11, + JVMTI_ERROR_INVALID_PRIORITY = 12, + JVMTI_ERROR_THREAD_NOT_SUSPENDED = 13, + JVMTI_ERROR_THREAD_SUSPENDED = 14, + JVMTI_ERROR_THREAD_NOT_ALIVE = 15, + JVMTI_ERROR_INVALID_OBJECT = 20, + JVMTI_ERROR_INVALID_CLASS = 21, + JVMTI_ERROR_CLASS_NOT_PREPARED = 22, + JVMTI_ERROR_INVALID_METHODID = 23, + JVMTI_ERROR_INVALID_LOCATION = 24, + JVMTI_ERROR_INVALID_FIELDID = 25, + JVMTI_ERROR_NO_MORE_FRAMES = 31, + JVMTI_ERROR_OPAQUE_FRAME = 32, + JVMTI_ERROR_TYPE_MISMATCH = 34, + JVMTI_ERROR_INVALID_SLOT = 35, + JVMTI_ERROR_DUPLICATE = 40, + JVMTI_ERROR_NOT_FOUND = 41, + JVMTI_ERROR_INVALID_MONITOR = 50, + JVMTI_ERROR_NOT_MONITOR_OWNER = 51, + JVMTI_ERROR_INTERRUPT = 52, + JVMTI_ERROR_INVALID_CLASS_FORMAT = 60, + JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION = 61, + JVMTI_ERROR_FAILS_VERIFICATION = 62, + JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED = 63, + JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED = 64, + JVMTI_ERROR_INVALID_TYPESTATE = 65, + JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED = 66, + JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED = 67, + JVMTI_ERROR_UNSUPPORTED_VERSION = 68, + JVMTI_ERROR_NAMES_DONT_MATCH = 69, + JVMTI_ERROR_UNSUPPORTED_REDEFINITION_CLASS_MODIFIERS_CHANGED = 70, + JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED = 71, + JVMTI_ERROR_UNMODIFIABLE_CLASS = 79, + JVMTI_ERROR_NOT_AVAILABLE = 98, + JVMTI_ERROR_MUST_POSSESS_CAPABILITY = 99, + JVMTI_ERROR_NULL_POINTER = 100, + JVMTI_ERROR_ABSENT_INFORMATION = 101, + JVMTI_ERROR_INVALID_EVENT_TYPE = 102, + JVMTI_ERROR_ILLEGAL_ARGUMENT = 103, + JVMTI_ERROR_NATIVE_METHOD = 104, + JVMTI_ERROR_CLASS_LOADER_UNSUPPORTED = 106, + JVMTI_ERROR_OUT_OF_MEMORY = 110, + JVMTI_ERROR_ACCESS_DENIED = 111, + JVMTI_ERROR_WRONG_PHASE = 112, + JVMTI_ERROR_INTERNAL = 113, + JVMTI_ERROR_UNATTACHED_THREAD = 115, + JVMTI_ERROR_INVALID_ENVIRONMENT = 116, + JVMTI_ERROR_MAX = 116 +} jvmtiError; + + /* Event IDs */ + +typedef enum { + JVMTI_MIN_EVENT_TYPE_VAL = 50, + JVMTI_EVENT_VM_INIT = 50, + JVMTI_EVENT_VM_DEATH = 51, + JVMTI_EVENT_THREAD_START = 52, + JVMTI_EVENT_THREAD_END = 53, + JVMTI_EVENT_CLASS_FILE_LOAD_HOOK = 54, + JVMTI_EVENT_CLASS_LOAD = 55, + JVMTI_EVENT_CLASS_PREPARE = 56, + JVMTI_EVENT_VM_START = 57, + JVMTI_EVENT_EXCEPTION = 58, + JVMTI_EVENT_EXCEPTION_CATCH = 59, + JVMTI_EVENT_SINGLE_STEP = 60, + JVMTI_EVENT_FRAME_POP = 61, + JVMTI_EVENT_BREAKPOINT = 62, + JVMTI_EVENT_FIELD_ACCESS = 63, + JVMTI_EVENT_FIELD_MODIFICATION = 64, + JVMTI_EVENT_METHOD_ENTRY = 65, + JVMTI_EVENT_METHOD_EXIT = 66, + JVMTI_EVENT_NATIVE_METHOD_BIND = 67, + JVMTI_EVENT_COMPILED_METHOD_LOAD = 68, + JVMTI_EVENT_COMPILED_METHOD_UNLOAD = 69, + JVMTI_EVENT_DYNAMIC_CODE_GENERATED = 70, + JVMTI_EVENT_DATA_DUMP_REQUEST = 71, + JVMTI_EVENT_MONITOR_WAIT = 73, + JVMTI_EVENT_MONITOR_WAITED = 74, + JVMTI_EVENT_MONITOR_CONTENDED_ENTER = 75, + JVMTI_EVENT_MONITOR_CONTENDED_ENTERED = 76, + JVMTI_EVENT_RESOURCE_EXHAUSTED = 80, + JVMTI_EVENT_GARBAGE_COLLECTION_START = 81, + JVMTI_EVENT_GARBAGE_COLLECTION_FINISH = 82, + JVMTI_EVENT_OBJECT_FREE = 83, + JVMTI_EVENT_VM_OBJECT_ALLOC = 84, + JVMTI_MAX_EVENT_TYPE_VAL = 84 +} jvmtiEvent; + + + /* Pre-Declarations */ +struct _jvmtiThreadInfo; +typedef struct _jvmtiThreadInfo jvmtiThreadInfo; +struct _jvmtiMonitorStackDepthInfo; +typedef struct _jvmtiMonitorStackDepthInfo jvmtiMonitorStackDepthInfo; +struct _jvmtiThreadGroupInfo; +typedef struct _jvmtiThreadGroupInfo jvmtiThreadGroupInfo; +struct _jvmtiFrameInfo; +typedef struct _jvmtiFrameInfo jvmtiFrameInfo; +struct _jvmtiStackInfo; +typedef struct _jvmtiStackInfo jvmtiStackInfo; +struct _jvmtiHeapReferenceInfoField; +typedef struct _jvmtiHeapReferenceInfoField jvmtiHeapReferenceInfoField; +struct _jvmtiHeapReferenceInfoArray; +typedef struct _jvmtiHeapReferenceInfoArray jvmtiHeapReferenceInfoArray; +struct _jvmtiHeapReferenceInfoConstantPool; +typedef struct _jvmtiHeapReferenceInfoConstantPool jvmtiHeapReferenceInfoConstantPool; +struct _jvmtiHeapReferenceInfoStackLocal; +typedef struct _jvmtiHeapReferenceInfoStackLocal jvmtiHeapReferenceInfoStackLocal; +struct _jvmtiHeapReferenceInfoJniLocal; +typedef struct _jvmtiHeapReferenceInfoJniLocal jvmtiHeapReferenceInfoJniLocal; +struct _jvmtiHeapReferenceInfoReserved; +typedef struct _jvmtiHeapReferenceInfoReserved jvmtiHeapReferenceInfoReserved; +union _jvmtiHeapReferenceInfo; +typedef union _jvmtiHeapReferenceInfo jvmtiHeapReferenceInfo; +struct _jvmtiHeapCallbacks; +typedef struct _jvmtiHeapCallbacks jvmtiHeapCallbacks; +struct _jvmtiClassDefinition; +typedef struct _jvmtiClassDefinition jvmtiClassDefinition; +struct _jvmtiMonitorUsage; +typedef struct _jvmtiMonitorUsage jvmtiMonitorUsage; +struct _jvmtiLineNumberEntry; +typedef struct _jvmtiLineNumberEntry jvmtiLineNumberEntry; +struct _jvmtiLocalVariableEntry; +typedef struct _jvmtiLocalVariableEntry jvmtiLocalVariableEntry; +struct _jvmtiParamInfo; +typedef struct _jvmtiParamInfo jvmtiParamInfo; +struct _jvmtiExtensionFunctionInfo; +typedef struct _jvmtiExtensionFunctionInfo jvmtiExtensionFunctionInfo; +struct _jvmtiExtensionEventInfo; +typedef struct _jvmtiExtensionEventInfo jvmtiExtensionEventInfo; +struct _jvmtiTimerInfo; +typedef struct _jvmtiTimerInfo jvmtiTimerInfo; +struct _jvmtiAddrLocationMap; +typedef struct _jvmtiAddrLocationMap jvmtiAddrLocationMap; + + /* Function Types */ + +typedef void (JNICALL *jvmtiStartFunction) + (jvmtiEnv* jvmti_env, JNIEnv* jni_env, void* arg); + +typedef jint (JNICALL *jvmtiHeapIterationCallback) + (jlong class_tag, jlong size, jlong* tag_ptr, jint length, void* user_data); + +typedef jint (JNICALL *jvmtiHeapReferenceCallback) + (jvmtiHeapReferenceKind reference_kind, const jvmtiHeapReferenceInfo* reference_info, jlong class_tag, jlong referrer_class_tag, jlong size, jlong* tag_ptr, jlong* referrer_tag_ptr, jint length, void* user_data); + +typedef jint (JNICALL *jvmtiPrimitiveFieldCallback) + (jvmtiHeapReferenceKind kind, const jvmtiHeapReferenceInfo* info, jlong object_class_tag, jlong* object_tag_ptr, jvalue value, jvmtiPrimitiveType value_type, void* user_data); + +typedef jint (JNICALL *jvmtiArrayPrimitiveValueCallback) + (jlong class_tag, jlong size, jlong* tag_ptr, jint element_count, jvmtiPrimitiveType element_type, const void* elements, void* user_data); + +typedef jint (JNICALL *jvmtiStringPrimitiveValueCallback) + (jlong class_tag, jlong size, jlong* tag_ptr, const jchar* value, jint value_length, void* user_data); + +typedef jint (JNICALL *jvmtiReservedCallback) + (); + +typedef jvmtiIterationControl (JNICALL *jvmtiHeapObjectCallback) + (jlong class_tag, jlong size, jlong* tag_ptr, void* user_data); + +typedef jvmtiIterationControl (JNICALL *jvmtiHeapRootCallback) + (jvmtiHeapRootKind root_kind, jlong class_tag, jlong size, jlong* tag_ptr, void* user_data); + +typedef jvmtiIterationControl (JNICALL *jvmtiStackReferenceCallback) + (jvmtiHeapRootKind root_kind, jlong class_tag, jlong size, jlong* tag_ptr, jlong thread_tag, jint depth, jmethodID method, jint slot, void* user_data); + +typedef jvmtiIterationControl (JNICALL *jvmtiObjectReferenceCallback) + (jvmtiObjectReferenceKind reference_kind, jlong class_tag, jlong size, jlong* tag_ptr, jlong referrer_tag, jint referrer_index, void* user_data); + +typedef jvmtiError (JNICALL *jvmtiExtensionFunction) + (jvmtiEnv* jvmti_env, ...); + +typedef void (JNICALL *jvmtiExtensionEvent) + (jvmtiEnv* jvmti_env, ...); + + + /* Structure Types */ +struct _jvmtiThreadInfo { + char* name; + jint priority; + jboolean is_daemon; + jthreadGroup thread_group; + jobject context_class_loader; +}; +struct _jvmtiMonitorStackDepthInfo { + jobject monitor; + jint stack_depth; +}; +struct _jvmtiThreadGroupInfo { + jthreadGroup parent; + char* name; + jint max_priority; + jboolean is_daemon; +}; +struct _jvmtiFrameInfo { + jmethodID method; + jlocation location; +}; +struct _jvmtiStackInfo { + jthread thread; + jint state; + jvmtiFrameInfo* frame_buffer; + jint frame_count; +}; +struct _jvmtiHeapReferenceInfoField { + jint index; +}; +struct _jvmtiHeapReferenceInfoArray { + jint index; +}; +struct _jvmtiHeapReferenceInfoConstantPool { + jint index; +}; +struct _jvmtiHeapReferenceInfoStackLocal { + jlong thread_tag; + jlong thread_id; + jint depth; + jmethodID method; + jlocation location; + jint slot; +}; +struct _jvmtiHeapReferenceInfoJniLocal { + jlong thread_tag; + jlong thread_id; + jint depth; + jmethodID method; +}; +struct _jvmtiHeapReferenceInfoReserved { + jlong reserved1; + jlong reserved2; + jlong reserved3; + jlong reserved4; + jlong reserved5; + jlong reserved6; + jlong reserved7; + jlong reserved8; +}; +union _jvmtiHeapReferenceInfo { + jvmtiHeapReferenceInfoField field; + jvmtiHeapReferenceInfoArray array; + jvmtiHeapReferenceInfoConstantPool constant_pool; + jvmtiHeapReferenceInfoStackLocal stack_local; + jvmtiHeapReferenceInfoJniLocal jni_local; + jvmtiHeapReferenceInfoReserved other; +}; +struct _jvmtiHeapCallbacks { + jvmtiHeapIterationCallback heap_iteration_callback; + jvmtiHeapReferenceCallback heap_reference_callback; + jvmtiPrimitiveFieldCallback primitive_field_callback; + jvmtiArrayPrimitiveValueCallback array_primitive_value_callback; + jvmtiStringPrimitiveValueCallback string_primitive_value_callback; + jvmtiReservedCallback reserved5; + jvmtiReservedCallback reserved6; + jvmtiReservedCallback reserved7; + jvmtiReservedCallback reserved8; + jvmtiReservedCallback reserved9; + jvmtiReservedCallback reserved10; + jvmtiReservedCallback reserved11; + jvmtiReservedCallback reserved12; + jvmtiReservedCallback reserved13; + jvmtiReservedCallback reserved14; + jvmtiReservedCallback reserved15; +}; +struct _jvmtiClassDefinition { + jclass klass; + jint class_byte_count; + const unsigned char* class_bytes; +}; +struct _jvmtiMonitorUsage { + jthread owner; + jint entry_count; + jint waiter_count; + jthread* waiters; + jint notify_waiter_count; + jthread* notify_waiters; +}; +struct _jvmtiLineNumberEntry { + jlocation start_location; + jint line_number; +}; +struct _jvmtiLocalVariableEntry { + jlocation start_location; + jint length; + char* name; + char* signature; + char* generic_signature; + jint slot; +}; +struct _jvmtiParamInfo { + char* name; + jvmtiParamKind kind; + jvmtiParamTypes base_type; + jboolean null_ok; +}; +struct _jvmtiExtensionFunctionInfo { + jvmtiExtensionFunction func; + char* id; + char* short_description; + jint param_count; + jvmtiParamInfo* params; + jint error_count; + jvmtiError* errors; +}; +struct _jvmtiExtensionEventInfo { + jint extension_event_index; + char* id; + char* short_description; + jint param_count; + jvmtiParamInfo* params; +}; +struct _jvmtiTimerInfo { + jlong max_value; + jboolean may_skip_forward; + jboolean may_skip_backward; + jvmtiTimerKind kind; + jlong reserved1; + jlong reserved2; +}; +struct _jvmtiAddrLocationMap { + const void* start_address; + jlocation location; +}; + +typedef struct { + unsigned int can_tag_objects : 1; + unsigned int can_generate_field_modification_events : 1; + unsigned int can_generate_field_access_events : 1; + unsigned int can_get_bytecodes : 1; + unsigned int can_get_synthetic_attribute : 1; + unsigned int can_get_owned_monitor_info : 1; + unsigned int can_get_current_contended_monitor : 1; + unsigned int can_get_monitor_info : 1; + unsigned int can_pop_frame : 1; + unsigned int can_redefine_classes : 1; + unsigned int can_signal_thread : 1; + unsigned int can_get_source_file_name : 1; + unsigned int can_get_line_numbers : 1; + unsigned int can_get_source_debug_extension : 1; + unsigned int can_access_local_variables : 1; + unsigned int can_maintain_original_method_order : 1; + unsigned int can_generate_single_step_events : 1; + unsigned int can_generate_exception_events : 1; + unsigned int can_generate_frame_pop_events : 1; + unsigned int can_generate_breakpoint_events : 1; + unsigned int can_suspend : 1; + unsigned int can_redefine_any_class : 1; + unsigned int can_get_current_thread_cpu_time : 1; + unsigned int can_get_thread_cpu_time : 1; + unsigned int can_generate_method_entry_events : 1; + unsigned int can_generate_method_exit_events : 1; + unsigned int can_generate_all_class_hook_events : 1; + unsigned int can_generate_compiled_method_load_events : 1; + unsigned int can_generate_monitor_events : 1; + unsigned int can_generate_vm_object_alloc_events : 1; + unsigned int can_generate_native_method_bind_events : 1; + unsigned int can_generate_garbage_collection_events : 1; + unsigned int can_generate_object_free_events : 1; + unsigned int can_force_early_return : 1; + unsigned int can_get_owned_monitor_stack_depth_info : 1; + unsigned int can_get_constant_pool : 1; + unsigned int can_set_native_method_prefix : 1; + unsigned int can_retransform_classes : 1; + unsigned int can_retransform_any_class : 1; + unsigned int can_generate_resource_exhaustion_heap_events : 1; + unsigned int can_generate_resource_exhaustion_threads_events : 1; + unsigned int : 7; + unsigned int : 16; + unsigned int : 16; + unsigned int : 16; + unsigned int : 16; + unsigned int : 16; +} jvmtiCapabilities; + + + /* Event Definitions */ + +typedef void (JNICALL *jvmtiEventReserved)(void); + + +typedef void (JNICALL *jvmtiEventBreakpoint) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jlocation location); + +typedef void (JNICALL *jvmtiEventClassFileLoadHook) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jclass class_being_redefined, + jobject loader, + const char* name, + jobject protection_domain, + jint class_data_len, + const unsigned char* class_data, + jint* new_class_data_len, + unsigned char** new_class_data); + +typedef void (JNICALL *jvmtiEventClassLoad) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jclass klass); + +typedef void (JNICALL *jvmtiEventClassPrepare) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jclass klass); + +typedef void (JNICALL *jvmtiEventCompiledMethodLoad) + (jvmtiEnv *jvmti_env, + jmethodID method, + jint code_size, + const void* code_addr, + jint map_length, + const jvmtiAddrLocationMap* map, + const void* compile_info); + +typedef void (JNICALL *jvmtiEventCompiledMethodUnload) + (jvmtiEnv *jvmti_env, + jmethodID method, + const void* code_addr); + +typedef void (JNICALL *jvmtiEventDataDumpRequest) + (jvmtiEnv *jvmti_env); + +typedef void (JNICALL *jvmtiEventDynamicCodeGenerated) + (jvmtiEnv *jvmti_env, + const char* name, + const void* address, + jint length); + +typedef void (JNICALL *jvmtiEventException) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jlocation location, + jobject exception, + jmethodID catch_method, + jlocation catch_location); + +typedef void (JNICALL *jvmtiEventExceptionCatch) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jlocation location, + jobject exception); + +typedef void (JNICALL *jvmtiEventFieldAccess) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jlocation location, + jclass field_klass, + jobject object, + jfieldID field); + +typedef void (JNICALL *jvmtiEventFieldModification) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jlocation location, + jclass field_klass, + jobject object, + jfieldID field, + char signature_type, + jvalue new_value); + +typedef void (JNICALL *jvmtiEventFramePop) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jboolean was_popped_by_exception); + +typedef void (JNICALL *jvmtiEventGarbageCollectionFinish) + (jvmtiEnv *jvmti_env); + +typedef void (JNICALL *jvmtiEventGarbageCollectionStart) + (jvmtiEnv *jvmti_env); + +typedef void (JNICALL *jvmtiEventMethodEntry) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method); + +typedef void (JNICALL *jvmtiEventMethodExit) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jboolean was_popped_by_exception, + jvalue return_value); + +typedef void (JNICALL *jvmtiEventMonitorContendedEnter) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jobject object); + +typedef void (JNICALL *jvmtiEventMonitorContendedEntered) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jobject object); + +typedef void (JNICALL *jvmtiEventMonitorWait) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jobject object, + jlong timeout); + +typedef void (JNICALL *jvmtiEventMonitorWaited) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jobject object, + jboolean timed_out); + +typedef void (JNICALL *jvmtiEventNativeMethodBind) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + void* address, + void** new_address_ptr); + +typedef void (JNICALL *jvmtiEventObjectFree) + (jvmtiEnv *jvmti_env, + jlong tag); + +typedef void (JNICALL *jvmtiEventResourceExhausted) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jint flags, + const void* reserved, + const char* description); + +typedef void (JNICALL *jvmtiEventSingleStep) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jmethodID method, + jlocation location); + +typedef void (JNICALL *jvmtiEventThreadEnd) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread); + +typedef void (JNICALL *jvmtiEventThreadStart) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread); + +typedef void (JNICALL *jvmtiEventVMDeath) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env); + +typedef void (JNICALL *jvmtiEventVMInit) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread); + +typedef void (JNICALL *jvmtiEventVMObjectAlloc) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jobject object, + jclass object_klass, + jlong size); + +typedef void (JNICALL *jvmtiEventVMStart) + (jvmtiEnv *jvmti_env, + JNIEnv* jni_env); + + /* Event Callback Structure */ + +typedef struct { + /* 50 : VM Initialization Event */ + jvmtiEventVMInit VMInit; + /* 51 : VM Death Event */ + jvmtiEventVMDeath VMDeath; + /* 52 : Thread Start */ + jvmtiEventThreadStart ThreadStart; + /* 53 : Thread End */ + jvmtiEventThreadEnd ThreadEnd; + /* 54 : Class File Load Hook */ + jvmtiEventClassFileLoadHook ClassFileLoadHook; + /* 55 : Class Load */ + jvmtiEventClassLoad ClassLoad; + /* 56 : Class Prepare */ + jvmtiEventClassPrepare ClassPrepare; + /* 57 : VM Start Event */ + jvmtiEventVMStart VMStart; + /* 58 : Exception */ + jvmtiEventException Exception; + /* 59 : Exception Catch */ + jvmtiEventExceptionCatch ExceptionCatch; + /* 60 : Single Step */ + jvmtiEventSingleStep SingleStep; + /* 61 : Frame Pop */ + jvmtiEventFramePop FramePop; + /* 62 : Breakpoint */ + jvmtiEventBreakpoint Breakpoint; + /* 63 : Field Access */ + jvmtiEventFieldAccess FieldAccess; + /* 64 : Field Modification */ + jvmtiEventFieldModification FieldModification; + /* 65 : Method Entry */ + jvmtiEventMethodEntry MethodEntry; + /* 66 : Method Exit */ + jvmtiEventMethodExit MethodExit; + /* 67 : Native Method Bind */ + jvmtiEventNativeMethodBind NativeMethodBind; + /* 68 : Compiled Method Load */ + jvmtiEventCompiledMethodLoad CompiledMethodLoad; + /* 69 : Compiled Method Unload */ + jvmtiEventCompiledMethodUnload CompiledMethodUnload; + /* 70 : Dynamic Code Generated */ + jvmtiEventDynamicCodeGenerated DynamicCodeGenerated; + /* 71 : Data Dump Request */ + jvmtiEventDataDumpRequest DataDumpRequest; + /* 72 */ + jvmtiEventReserved reserved72; + /* 73 : Monitor Wait */ + jvmtiEventMonitorWait MonitorWait; + /* 74 : Monitor Waited */ + jvmtiEventMonitorWaited MonitorWaited; + /* 75 : Monitor Contended Enter */ + jvmtiEventMonitorContendedEnter MonitorContendedEnter; + /* 76 : Monitor Contended Entered */ + jvmtiEventMonitorContendedEntered MonitorContendedEntered; + /* 77 */ + jvmtiEventReserved reserved77; + /* 78 */ + jvmtiEventReserved reserved78; + /* 79 */ + jvmtiEventReserved reserved79; + /* 80 : Resource Exhausted */ + jvmtiEventResourceExhausted ResourceExhausted; + /* 81 : Garbage Collection Start */ + jvmtiEventGarbageCollectionStart GarbageCollectionStart; + /* 82 : Garbage Collection Finish */ + jvmtiEventGarbageCollectionFinish GarbageCollectionFinish; + /* 83 : Object Free */ + jvmtiEventObjectFree ObjectFree; + /* 84 : VM Object Allocation */ + jvmtiEventVMObjectAlloc VMObjectAlloc; +} jvmtiEventCallbacks; + + + /* Function Interface */ + +typedef struct jvmtiInterface_1_ { + + /* 1 : RESERVED */ + void *reserved1; + + /* 2 : Set Event Notification Mode */ + jvmtiError (JNICALL *SetEventNotificationMode) (jvmtiEnv* env, + jvmtiEventMode mode, + jvmtiEvent event_type, + jthread event_thread, + ...); + + /* 3 : RESERVED */ + void *reserved3; + + /* 4 : Get All Threads */ + jvmtiError (JNICALL *GetAllThreads) (jvmtiEnv* env, + jint* threads_count_ptr, + jthread** threads_ptr); + + /* 5 : Suspend Thread */ + jvmtiError (JNICALL *SuspendThread) (jvmtiEnv* env, + jthread thread); + + /* 6 : Resume Thread */ + jvmtiError (JNICALL *ResumeThread) (jvmtiEnv* env, + jthread thread); + + /* 7 : Stop Thread */ + jvmtiError (JNICALL *StopThread) (jvmtiEnv* env, + jthread thread, + jobject exception); + + /* 8 : Interrupt Thread */ + jvmtiError (JNICALL *InterruptThread) (jvmtiEnv* env, + jthread thread); + + /* 9 : Get Thread Info */ + jvmtiError (JNICALL *GetThreadInfo) (jvmtiEnv* env, + jthread thread, + jvmtiThreadInfo* info_ptr); + + /* 10 : Get Owned Monitor Info */ + jvmtiError (JNICALL *GetOwnedMonitorInfo) (jvmtiEnv* env, + jthread thread, + jint* owned_monitor_count_ptr, + jobject** owned_monitors_ptr); + + /* 11 : Get Current Contended Monitor */ + jvmtiError (JNICALL *GetCurrentContendedMonitor) (jvmtiEnv* env, + jthread thread, + jobject* monitor_ptr); + + /* 12 : Run Agent Thread */ + jvmtiError (JNICALL *RunAgentThread) (jvmtiEnv* env, + jthread thread, + jvmtiStartFunction proc, + const void* arg, + jint priority); + + /* 13 : Get Top Thread Groups */ + jvmtiError (JNICALL *GetTopThreadGroups) (jvmtiEnv* env, + jint* group_count_ptr, + jthreadGroup** groups_ptr); + + /* 14 : Get Thread Group Info */ + jvmtiError (JNICALL *GetThreadGroupInfo) (jvmtiEnv* env, + jthreadGroup group, + jvmtiThreadGroupInfo* info_ptr); + + /* 15 : Get Thread Group Children */ + jvmtiError (JNICALL *GetThreadGroupChildren) (jvmtiEnv* env, + jthreadGroup group, + jint* thread_count_ptr, + jthread** threads_ptr, + jint* group_count_ptr, + jthreadGroup** groups_ptr); + + /* 16 : Get Frame Count */ + jvmtiError (JNICALL *GetFrameCount) (jvmtiEnv* env, + jthread thread, + jint* count_ptr); + + /* 17 : Get Thread State */ + jvmtiError (JNICALL *GetThreadState) (jvmtiEnv* env, + jthread thread, + jint* thread_state_ptr); + + /* 18 : Get Current Thread */ + jvmtiError (JNICALL *GetCurrentThread) (jvmtiEnv* env, + jthread* thread_ptr); + + /* 19 : Get Frame Location */ + jvmtiError (JNICALL *GetFrameLocation) (jvmtiEnv* env, + jthread thread, + jint depth, + jmethodID* method_ptr, + jlocation* location_ptr); + + /* 20 : Notify Frame Pop */ + jvmtiError (JNICALL *NotifyFramePop) (jvmtiEnv* env, + jthread thread, + jint depth); + + /* 21 : Get Local Variable - Object */ + jvmtiError (JNICALL *GetLocalObject) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jobject* value_ptr); + + /* 22 : Get Local Variable - Int */ + jvmtiError (JNICALL *GetLocalInt) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jint* value_ptr); + + /* 23 : Get Local Variable - Long */ + jvmtiError (JNICALL *GetLocalLong) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jlong* value_ptr); + + /* 24 : Get Local Variable - Float */ + jvmtiError (JNICALL *GetLocalFloat) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jfloat* value_ptr); + + /* 25 : Get Local Variable - Double */ + jvmtiError (JNICALL *GetLocalDouble) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jdouble* value_ptr); + + /* 26 : Set Local Variable - Object */ + jvmtiError (JNICALL *SetLocalObject) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jobject value); + + /* 27 : Set Local Variable - Int */ + jvmtiError (JNICALL *SetLocalInt) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jint value); + + /* 28 : Set Local Variable - Long */ + jvmtiError (JNICALL *SetLocalLong) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jlong value); + + /* 29 : Set Local Variable - Float */ + jvmtiError (JNICALL *SetLocalFloat) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jfloat value); + + /* 30 : Set Local Variable - Double */ + jvmtiError (JNICALL *SetLocalDouble) (jvmtiEnv* env, + jthread thread, + jint depth, + jint slot, + jdouble value); + + /* 31 : Create Raw Monitor */ + jvmtiError (JNICALL *CreateRawMonitor) (jvmtiEnv* env, + const char* name, + jrawMonitorID* monitor_ptr); + + /* 32 : Destroy Raw Monitor */ + jvmtiError (JNICALL *DestroyRawMonitor) (jvmtiEnv* env, + jrawMonitorID monitor); + + /* 33 : Raw Monitor Enter */ + jvmtiError (JNICALL *RawMonitorEnter) (jvmtiEnv* env, + jrawMonitorID monitor); + + /* 34 : Raw Monitor Exit */ + jvmtiError (JNICALL *RawMonitorExit) (jvmtiEnv* env, + jrawMonitorID monitor); + + /* 35 : Raw Monitor Wait */ + jvmtiError (JNICALL *RawMonitorWait) (jvmtiEnv* env, + jrawMonitorID monitor, + jlong millis); + + /* 36 : Raw Monitor Notify */ + jvmtiError (JNICALL *RawMonitorNotify) (jvmtiEnv* env, + jrawMonitorID monitor); + + /* 37 : Raw Monitor Notify All */ + jvmtiError (JNICALL *RawMonitorNotifyAll) (jvmtiEnv* env, + jrawMonitorID monitor); + + /* 38 : Set Breakpoint */ + jvmtiError (JNICALL *SetBreakpoint) (jvmtiEnv* env, + jmethodID method, + jlocation location); + + /* 39 : Clear Breakpoint */ + jvmtiError (JNICALL *ClearBreakpoint) (jvmtiEnv* env, + jmethodID method, + jlocation location); + + /* 40 : RESERVED */ + void *reserved40; + + /* 41 : Set Field Access Watch */ + jvmtiError (JNICALL *SetFieldAccessWatch) (jvmtiEnv* env, + jclass klass, + jfieldID field); + + /* 42 : Clear Field Access Watch */ + jvmtiError (JNICALL *ClearFieldAccessWatch) (jvmtiEnv* env, + jclass klass, + jfieldID field); + + /* 43 : Set Field Modification Watch */ + jvmtiError (JNICALL *SetFieldModificationWatch) (jvmtiEnv* env, + jclass klass, + jfieldID field); + + /* 44 : Clear Field Modification Watch */ + jvmtiError (JNICALL *ClearFieldModificationWatch) (jvmtiEnv* env, + jclass klass, + jfieldID field); + + /* 45 : Is Modifiable Class */ + jvmtiError (JNICALL *IsModifiableClass) (jvmtiEnv* env, + jclass klass, + jboolean* is_modifiable_class_ptr); + + /* 46 : Allocate */ + jvmtiError (JNICALL *Allocate) (jvmtiEnv* env, + jlong size, + unsigned char** mem_ptr); + + /* 47 : Deallocate */ + jvmtiError (JNICALL *Deallocate) (jvmtiEnv* env, + unsigned char* mem); + + /* 48 : Get Class Signature */ + jvmtiError (JNICALL *GetClassSignature) (jvmtiEnv* env, + jclass klass, + char** signature_ptr, + char** generic_ptr); + + /* 49 : Get Class Status */ + jvmtiError (JNICALL *GetClassStatus) (jvmtiEnv* env, + jclass klass, + jint* status_ptr); + + /* 50 : Get Source File Name */ + jvmtiError (JNICALL *GetSourceFileName) (jvmtiEnv* env, + jclass klass, + char** source_name_ptr); + + /* 51 : Get Class Modifiers */ + jvmtiError (JNICALL *GetClassModifiers) (jvmtiEnv* env, + jclass klass, + jint* modifiers_ptr); + + /* 52 : Get Class Methods */ + jvmtiError (JNICALL *GetClassMethods) (jvmtiEnv* env, + jclass klass, + jint* method_count_ptr, + jmethodID** methods_ptr); + + /* 53 : Get Class Fields */ + jvmtiError (JNICALL *GetClassFields) (jvmtiEnv* env, + jclass klass, + jint* field_count_ptr, + jfieldID** fields_ptr); + + /* 54 : Get Implemented Interfaces */ + jvmtiError (JNICALL *GetImplementedInterfaces) (jvmtiEnv* env, + jclass klass, + jint* interface_count_ptr, + jclass** interfaces_ptr); + + /* 55 : Is Interface */ + jvmtiError (JNICALL *IsInterface) (jvmtiEnv* env, + jclass klass, + jboolean* is_interface_ptr); + + /* 56 : Is Array Class */ + jvmtiError (JNICALL *IsArrayClass) (jvmtiEnv* env, + jclass klass, + jboolean* is_array_class_ptr); + + /* 57 : Get Class Loader */ + jvmtiError (JNICALL *GetClassLoader) (jvmtiEnv* env, + jclass klass, + jobject* classloader_ptr); + + /* 58 : Get Object Hash Code */ + jvmtiError (JNICALL *GetObjectHashCode) (jvmtiEnv* env, + jobject object, + jint* hash_code_ptr); + + /* 59 : Get Object Monitor Usage */ + jvmtiError (JNICALL *GetObjectMonitorUsage) (jvmtiEnv* env, + jobject object, + jvmtiMonitorUsage* info_ptr); + + /* 60 : Get Field Name (and Signature) */ + jvmtiError (JNICALL *GetFieldName) (jvmtiEnv* env, + jclass klass, + jfieldID field, + char** name_ptr, + char** signature_ptr, + char** generic_ptr); + + /* 61 : Get Field Declaring Class */ + jvmtiError (JNICALL *GetFieldDeclaringClass) (jvmtiEnv* env, + jclass klass, + jfieldID field, + jclass* declaring_class_ptr); + + /* 62 : Get Field Modifiers */ + jvmtiError (JNICALL *GetFieldModifiers) (jvmtiEnv* env, + jclass klass, + jfieldID field, + jint* modifiers_ptr); + + /* 63 : Is Field Synthetic */ + jvmtiError (JNICALL *IsFieldSynthetic) (jvmtiEnv* env, + jclass klass, + jfieldID field, + jboolean* is_synthetic_ptr); + + /* 64 : Get Method Name (and Signature) */ + jvmtiError (JNICALL *GetMethodName) (jvmtiEnv* env, + jmethodID method, + char** name_ptr, + char** signature_ptr, + char** generic_ptr); + + /* 65 : Get Method Declaring Class */ + jvmtiError (JNICALL *GetMethodDeclaringClass) (jvmtiEnv* env, + jmethodID method, + jclass* declaring_class_ptr); + + /* 66 : Get Method Modifiers */ + jvmtiError (JNICALL *GetMethodModifiers) (jvmtiEnv* env, + jmethodID method, + jint* modifiers_ptr); + + /* 67 : RESERVED */ + void *reserved67; + + /* 68 : Get Max Locals */ + jvmtiError (JNICALL *GetMaxLocals) (jvmtiEnv* env, + jmethodID method, + jint* max_ptr); + + /* 69 : Get Arguments Size */ + jvmtiError (JNICALL *GetArgumentsSize) (jvmtiEnv* env, + jmethodID method, + jint* size_ptr); + + /* 70 : Get Line Number Table */ + jvmtiError (JNICALL *GetLineNumberTable) (jvmtiEnv* env, + jmethodID method, + jint* entry_count_ptr, + jvmtiLineNumberEntry** table_ptr); + + /* 71 : Get Method Location */ + jvmtiError (JNICALL *GetMethodLocation) (jvmtiEnv* env, + jmethodID method, + jlocation* start_location_ptr, + jlocation* end_location_ptr); + + /* 72 : Get Local Variable Table */ + jvmtiError (JNICALL *GetLocalVariableTable) (jvmtiEnv* env, + jmethodID method, + jint* entry_count_ptr, + jvmtiLocalVariableEntry** table_ptr); + + /* 73 : Set Native Method Prefix */ + jvmtiError (JNICALL *SetNativeMethodPrefix) (jvmtiEnv* env, + const char* prefix); + + /* 74 : Set Native Method Prefixes */ + jvmtiError (JNICALL *SetNativeMethodPrefixes) (jvmtiEnv* env, + jint prefix_count, + char** prefixes); + + /* 75 : Get Bytecodes */ + jvmtiError (JNICALL *GetBytecodes) (jvmtiEnv* env, + jmethodID method, + jint* bytecode_count_ptr, + unsigned char** bytecodes_ptr); + + /* 76 : Is Method Native */ + jvmtiError (JNICALL *IsMethodNative) (jvmtiEnv* env, + jmethodID method, + jboolean* is_native_ptr); + + /* 77 : Is Method Synthetic */ + jvmtiError (JNICALL *IsMethodSynthetic) (jvmtiEnv* env, + jmethodID method, + jboolean* is_synthetic_ptr); + + /* 78 : Get Loaded Classes */ + jvmtiError (JNICALL *GetLoadedClasses) (jvmtiEnv* env, + jint* class_count_ptr, + jclass** classes_ptr); + + /* 79 : Get Classloader Classes */ + jvmtiError (JNICALL *GetClassLoaderClasses) (jvmtiEnv* env, + jobject initiating_loader, + jint* class_count_ptr, + jclass** classes_ptr); + + /* 80 : Pop Frame */ + jvmtiError (JNICALL *PopFrame) (jvmtiEnv* env, + jthread thread); + + /* 81 : Force Early Return - Object */ + jvmtiError (JNICALL *ForceEarlyReturnObject) (jvmtiEnv* env, + jthread thread, + jobject value); + + /* 82 : Force Early Return - Int */ + jvmtiError (JNICALL *ForceEarlyReturnInt) (jvmtiEnv* env, + jthread thread, + jint value); + + /* 83 : Force Early Return - Long */ + jvmtiError (JNICALL *ForceEarlyReturnLong) (jvmtiEnv* env, + jthread thread, + jlong value); + + /* 84 : Force Early Return - Float */ + jvmtiError (JNICALL *ForceEarlyReturnFloat) (jvmtiEnv* env, + jthread thread, + jfloat value); + + /* 85 : Force Early Return - Double */ + jvmtiError (JNICALL *ForceEarlyReturnDouble) (jvmtiEnv* env, + jthread thread, + jdouble value); + + /* 86 : Force Early Return - Void */ + jvmtiError (JNICALL *ForceEarlyReturnVoid) (jvmtiEnv* env, + jthread thread); + + /* 87 : Redefine Classes */ + jvmtiError (JNICALL *RedefineClasses) (jvmtiEnv* env, + jint class_count, + const jvmtiClassDefinition* class_definitions); + + /* 88 : Get Version Number */ + jvmtiError (JNICALL *GetVersionNumber) (jvmtiEnv* env, + jint* version_ptr); + + /* 89 : Get Capabilities */ + jvmtiError (JNICALL *GetCapabilities) (jvmtiEnv* env, + jvmtiCapabilities* capabilities_ptr); + + /* 90 : Get Source Debug Extension */ + jvmtiError (JNICALL *GetSourceDebugExtension) (jvmtiEnv* env, + jclass klass, + char** source_debug_extension_ptr); + + /* 91 : Is Method Obsolete */ + jvmtiError (JNICALL *IsMethodObsolete) (jvmtiEnv* env, + jmethodID method, + jboolean* is_obsolete_ptr); + + /* 92 : Suspend Thread List */ + jvmtiError (JNICALL *SuspendThreadList) (jvmtiEnv* env, + jint request_count, + const jthread* request_list, + jvmtiError* results); + + /* 93 : Resume Thread List */ + jvmtiError (JNICALL *ResumeThreadList) (jvmtiEnv* env, + jint request_count, + const jthread* request_list, + jvmtiError* results); + + /* 94 : RESERVED */ + void *reserved94; + + /* 95 : RESERVED */ + void *reserved95; + + /* 96 : RESERVED */ + void *reserved96; + + /* 97 : RESERVED */ + void *reserved97; + + /* 98 : RESERVED */ + void *reserved98; + + /* 99 : RESERVED */ + void *reserved99; + + /* 100 : Get All Stack Traces */ + jvmtiError (JNICALL *GetAllStackTraces) (jvmtiEnv* env, + jint max_frame_count, + jvmtiStackInfo** stack_info_ptr, + jint* thread_count_ptr); + + /* 101 : Get Thread List Stack Traces */ + jvmtiError (JNICALL *GetThreadListStackTraces) (jvmtiEnv* env, + jint thread_count, + const jthread* thread_list, + jint max_frame_count, + jvmtiStackInfo** stack_info_ptr); + + /* 102 : Get Thread Local Storage */ + jvmtiError (JNICALL *GetThreadLocalStorage) (jvmtiEnv* env, + jthread thread, + void** data_ptr); + + /* 103 : Set Thread Local Storage */ + jvmtiError (JNICALL *SetThreadLocalStorage) (jvmtiEnv* env, + jthread thread, + const void* data); + + /* 104 : Get Stack Trace */ + jvmtiError (JNICALL *GetStackTrace) (jvmtiEnv* env, + jthread thread, + jint start_depth, + jint max_frame_count, + jvmtiFrameInfo* frame_buffer, + jint* count_ptr); + + /* 105 : RESERVED */ + void *reserved105; + + /* 106 : Get Tag */ + jvmtiError (JNICALL *GetTag) (jvmtiEnv* env, + jobject object, + jlong* tag_ptr); + + /* 107 : Set Tag */ + jvmtiError (JNICALL *SetTag) (jvmtiEnv* env, + jobject object, + jlong tag); + + /* 108 : Force Garbage Collection */ + jvmtiError (JNICALL *ForceGarbageCollection) (jvmtiEnv* env); + + /* 109 : Iterate Over Objects Reachable From Object */ + jvmtiError (JNICALL *IterateOverObjectsReachableFromObject) (jvmtiEnv* env, + jobject object, + jvmtiObjectReferenceCallback object_reference_callback, + const void* user_data); + + /* 110 : Iterate Over Reachable Objects */ + jvmtiError (JNICALL *IterateOverReachableObjects) (jvmtiEnv* env, + jvmtiHeapRootCallback heap_root_callback, + jvmtiStackReferenceCallback stack_ref_callback, + jvmtiObjectReferenceCallback object_ref_callback, + const void* user_data); + + /* 111 : Iterate Over Heap */ + jvmtiError (JNICALL *IterateOverHeap) (jvmtiEnv* env, + jvmtiHeapObjectFilter object_filter, + jvmtiHeapObjectCallback heap_object_callback, + const void* user_data); + + /* 112 : Iterate Over Instances Of Class */ + jvmtiError (JNICALL *IterateOverInstancesOfClass) (jvmtiEnv* env, + jclass klass, + jvmtiHeapObjectFilter object_filter, + jvmtiHeapObjectCallback heap_object_callback, + const void* user_data); + + /* 113 : RESERVED */ + void *reserved113; + + /* 114 : Get Objects With Tags */ + jvmtiError (JNICALL *GetObjectsWithTags) (jvmtiEnv* env, + jint tag_count, + const jlong* tags, + jint* count_ptr, + jobject** object_result_ptr, + jlong** tag_result_ptr); + + /* 115 : Follow References */ + jvmtiError (JNICALL *FollowReferences) (jvmtiEnv* env, + jint heap_filter, + jclass klass, + jobject initial_object, + const jvmtiHeapCallbacks* callbacks, + const void* user_data); + + /* 116 : Iterate Through Heap */ + jvmtiError (JNICALL *IterateThroughHeap) (jvmtiEnv* env, + jint heap_filter, + jclass klass, + const jvmtiHeapCallbacks* callbacks, + const void* user_data); + + /* 117 : RESERVED */ + void *reserved117; + + /* 118 : RESERVED */ + void *reserved118; + + /* 119 : RESERVED */ + void *reserved119; + + /* 120 : Set JNI Function Table */ + jvmtiError (JNICALL *SetJNIFunctionTable) (jvmtiEnv* env, + const jniNativeInterface* function_table); + + /* 121 : Get JNI Function Table */ + jvmtiError (JNICALL *GetJNIFunctionTable) (jvmtiEnv* env, + jniNativeInterface** function_table); + + /* 122 : Set Event Callbacks */ + jvmtiError (JNICALL *SetEventCallbacks) (jvmtiEnv* env, + const jvmtiEventCallbacks* callbacks, + jint size_of_callbacks); + + /* 123 : Generate Events */ + jvmtiError (JNICALL *GenerateEvents) (jvmtiEnv* env, + jvmtiEvent event_type); + + /* 124 : Get Extension Functions */ + jvmtiError (JNICALL *GetExtensionFunctions) (jvmtiEnv* env, + jint* extension_count_ptr, + jvmtiExtensionFunctionInfo** extensions); + + /* 125 : Get Extension Events */ + jvmtiError (JNICALL *GetExtensionEvents) (jvmtiEnv* env, + jint* extension_count_ptr, + jvmtiExtensionEventInfo** extensions); + + /* 126 : Set Extension Event Callback */ + jvmtiError (JNICALL *SetExtensionEventCallback) (jvmtiEnv* env, + jint extension_event_index, + jvmtiExtensionEvent callback); + + /* 127 : Dispose Environment */ + jvmtiError (JNICALL *DisposeEnvironment) (jvmtiEnv* env); + + /* 128 : Get Error Name */ + jvmtiError (JNICALL *GetErrorName) (jvmtiEnv* env, + jvmtiError error, + char** name_ptr); + + /* 129 : Get JLocation Format */ + jvmtiError (JNICALL *GetJLocationFormat) (jvmtiEnv* env, + jvmtiJlocationFormat* format_ptr); + + /* 130 : Get System Properties */ + jvmtiError (JNICALL *GetSystemProperties) (jvmtiEnv* env, + jint* count_ptr, + char*** property_ptr); + + /* 131 : Get System Property */ + jvmtiError (JNICALL *GetSystemProperty) (jvmtiEnv* env, + const char* property, + char** value_ptr); + + /* 132 : Set System Property */ + jvmtiError (JNICALL *SetSystemProperty) (jvmtiEnv* env, + const char* property, + const char* value); + + /* 133 : Get Phase */ + jvmtiError (JNICALL *GetPhase) (jvmtiEnv* env, + jvmtiPhase* phase_ptr); + + /* 134 : Get Current Thread CPU Timer Information */ + jvmtiError (JNICALL *GetCurrentThreadCpuTimerInfo) (jvmtiEnv* env, + jvmtiTimerInfo* info_ptr); + + /* 135 : Get Current Thread CPU Time */ + jvmtiError (JNICALL *GetCurrentThreadCpuTime) (jvmtiEnv* env, + jlong* nanos_ptr); + + /* 136 : Get Thread CPU Timer Information */ + jvmtiError (JNICALL *GetThreadCpuTimerInfo) (jvmtiEnv* env, + jvmtiTimerInfo* info_ptr); + + /* 137 : Get Thread CPU Time */ + jvmtiError (JNICALL *GetThreadCpuTime) (jvmtiEnv* env, + jthread thread, + jlong* nanos_ptr); + + /* 138 : Get Timer Information */ + jvmtiError (JNICALL *GetTimerInfo) (jvmtiEnv* env, + jvmtiTimerInfo* info_ptr); + + /* 139 : Get Time */ + jvmtiError (JNICALL *GetTime) (jvmtiEnv* env, + jlong* nanos_ptr); + + /* 140 : Get Potential Capabilities */ + jvmtiError (JNICALL *GetPotentialCapabilities) (jvmtiEnv* env, + jvmtiCapabilities* capabilities_ptr); + + /* 141 : RESERVED */ + void *reserved141; + + /* 142 : Add Capabilities */ + jvmtiError (JNICALL *AddCapabilities) (jvmtiEnv* env, + const jvmtiCapabilities* capabilities_ptr); + + /* 143 : Relinquish Capabilities */ + jvmtiError (JNICALL *RelinquishCapabilities) (jvmtiEnv* env, + const jvmtiCapabilities* capabilities_ptr); + + /* 144 : Get Available Processors */ + jvmtiError (JNICALL *GetAvailableProcessors) (jvmtiEnv* env, + jint* processor_count_ptr); + + /* 145 : Get Class Version Numbers */ + jvmtiError (JNICALL *GetClassVersionNumbers) (jvmtiEnv* env, + jclass klass, + jint* minor_version_ptr, + jint* major_version_ptr); + + /* 146 : Get Constant Pool */ + jvmtiError (JNICALL *GetConstantPool) (jvmtiEnv* env, + jclass klass, + jint* constant_pool_count_ptr, + jint* constant_pool_byte_count_ptr, + unsigned char** constant_pool_bytes_ptr); + + /* 147 : Get Environment Local Storage */ + jvmtiError (JNICALL *GetEnvironmentLocalStorage) (jvmtiEnv* env, + void** data_ptr); + + /* 148 : Set Environment Local Storage */ + jvmtiError (JNICALL *SetEnvironmentLocalStorage) (jvmtiEnv* env, + const void* data); + + /* 149 : Add To Bootstrap Class Loader Search */ + jvmtiError (JNICALL *AddToBootstrapClassLoaderSearch) (jvmtiEnv* env, + const char* segment); + + /* 150 : Set Verbose Flag */ + jvmtiError (JNICALL *SetVerboseFlag) (jvmtiEnv* env, + jvmtiVerboseFlag flag, + jboolean value); + + /* 151 : Add To System Class Loader Search */ + jvmtiError (JNICALL *AddToSystemClassLoaderSearch) (jvmtiEnv* env, + const char* segment); + + /* 152 : Retransform Classes */ + jvmtiError (JNICALL *RetransformClasses) (jvmtiEnv* env, + jint class_count, + const jclass* classes); + + /* 153 : Get Owned Monitor Stack Depth Info */ + jvmtiError (JNICALL *GetOwnedMonitorStackDepthInfo) (jvmtiEnv* env, + jthread thread, + jint* monitor_info_count_ptr, + jvmtiMonitorStackDepthInfo** monitor_info_ptr); + + /* 154 : Get Object Size */ + jvmtiError (JNICALL *GetObjectSize) (jvmtiEnv* env, + jobject object, + jlong* size_ptr); + +} jvmtiInterface_1; + +struct _jvmtiEnv { + const struct jvmtiInterface_1_ *functions; +#ifdef __cplusplus + + + jvmtiError Allocate(jlong size, + unsigned char** mem_ptr) { + return functions->Allocate(this, size, mem_ptr); + } + + jvmtiError Deallocate(unsigned char* mem) { + return functions->Deallocate(this, mem); + } + + jvmtiError GetThreadState(jthread thread, + jint* thread_state_ptr) { + return functions->GetThreadState(this, thread, thread_state_ptr); + } + + jvmtiError GetCurrentThread(jthread* thread_ptr) { + return functions->GetCurrentThread(this, thread_ptr); + } + + jvmtiError GetAllThreads(jint* threads_count_ptr, + jthread** threads_ptr) { + return functions->GetAllThreads(this, threads_count_ptr, threads_ptr); + } + + jvmtiError SuspendThread(jthread thread) { + return functions->SuspendThread(this, thread); + } + + jvmtiError SuspendThreadList(jint request_count, + const jthread* request_list, + jvmtiError* results) { + return functions->SuspendThreadList(this, request_count, request_list, results); + } + + jvmtiError ResumeThread(jthread thread) { + return functions->ResumeThread(this, thread); + } + + jvmtiError ResumeThreadList(jint request_count, + const jthread* request_list, + jvmtiError* results) { + return functions->ResumeThreadList(this, request_count, request_list, results); + } + + jvmtiError StopThread(jthread thread, + jobject exception) { + return functions->StopThread(this, thread, exception); + } + + jvmtiError InterruptThread(jthread thread) { + return functions->InterruptThread(this, thread); + } + + jvmtiError GetThreadInfo(jthread thread, + jvmtiThreadInfo* info_ptr) { + return functions->GetThreadInfo(this, thread, info_ptr); + } + + jvmtiError GetOwnedMonitorInfo(jthread thread, + jint* owned_monitor_count_ptr, + jobject** owned_monitors_ptr) { + return functions->GetOwnedMonitorInfo(this, thread, owned_monitor_count_ptr, owned_monitors_ptr); + } + + jvmtiError GetOwnedMonitorStackDepthInfo(jthread thread, + jint* monitor_info_count_ptr, + jvmtiMonitorStackDepthInfo** monitor_info_ptr) { + return functions->GetOwnedMonitorStackDepthInfo(this, thread, monitor_info_count_ptr, monitor_info_ptr); + } + + jvmtiError GetCurrentContendedMonitor(jthread thread, + jobject* monitor_ptr) { + return functions->GetCurrentContendedMonitor(this, thread, monitor_ptr); + } + + jvmtiError RunAgentThread(jthread thread, + jvmtiStartFunction proc, + const void* arg, + jint priority) { + return functions->RunAgentThread(this, thread, proc, arg, priority); + } + + jvmtiError SetThreadLocalStorage(jthread thread, + const void* data) { + return functions->SetThreadLocalStorage(this, thread, data); + } + + jvmtiError GetThreadLocalStorage(jthread thread, + void** data_ptr) { + return functions->GetThreadLocalStorage(this, thread, data_ptr); + } + + jvmtiError GetTopThreadGroups(jint* group_count_ptr, + jthreadGroup** groups_ptr) { + return functions->GetTopThreadGroups(this, group_count_ptr, groups_ptr); + } + + jvmtiError GetThreadGroupInfo(jthreadGroup group, + jvmtiThreadGroupInfo* info_ptr) { + return functions->GetThreadGroupInfo(this, group, info_ptr); + } + + jvmtiError GetThreadGroupChildren(jthreadGroup group, + jint* thread_count_ptr, + jthread** threads_ptr, + jint* group_count_ptr, + jthreadGroup** groups_ptr) { + return functions->GetThreadGroupChildren(this, group, thread_count_ptr, threads_ptr, group_count_ptr, groups_ptr); + } + + jvmtiError GetStackTrace(jthread thread, + jint start_depth, + jint max_frame_count, + jvmtiFrameInfo* frame_buffer, + jint* count_ptr) { + return functions->GetStackTrace(this, thread, start_depth, max_frame_count, frame_buffer, count_ptr); + } + + jvmtiError GetAllStackTraces(jint max_frame_count, + jvmtiStackInfo** stack_info_ptr, + jint* thread_count_ptr) { + return functions->GetAllStackTraces(this, max_frame_count, stack_info_ptr, thread_count_ptr); + } + + jvmtiError GetThreadListStackTraces(jint thread_count, + const jthread* thread_list, + jint max_frame_count, + jvmtiStackInfo** stack_info_ptr) { + return functions->GetThreadListStackTraces(this, thread_count, thread_list, max_frame_count, stack_info_ptr); + } + + jvmtiError GetFrameCount(jthread thread, + jint* count_ptr) { + return functions->GetFrameCount(this, thread, count_ptr); + } + + jvmtiError PopFrame(jthread thread) { + return functions->PopFrame(this, thread); + } + + jvmtiError GetFrameLocation(jthread thread, + jint depth, + jmethodID* method_ptr, + jlocation* location_ptr) { + return functions->GetFrameLocation(this, thread, depth, method_ptr, location_ptr); + } + + jvmtiError NotifyFramePop(jthread thread, + jint depth) { + return functions->NotifyFramePop(this, thread, depth); + } + + jvmtiError ForceEarlyReturnObject(jthread thread, + jobject value) { + return functions->ForceEarlyReturnObject(this, thread, value); + } + + jvmtiError ForceEarlyReturnInt(jthread thread, + jint value) { + return functions->ForceEarlyReturnInt(this, thread, value); + } + + jvmtiError ForceEarlyReturnLong(jthread thread, + jlong value) { + return functions->ForceEarlyReturnLong(this, thread, value); + } + + jvmtiError ForceEarlyReturnFloat(jthread thread, + jfloat value) { + return functions->ForceEarlyReturnFloat(this, thread, value); + } + + jvmtiError ForceEarlyReturnDouble(jthread thread, + jdouble value) { + return functions->ForceEarlyReturnDouble(this, thread, value); + } + + jvmtiError ForceEarlyReturnVoid(jthread thread) { + return functions->ForceEarlyReturnVoid(this, thread); + } + + jvmtiError FollowReferences(jint heap_filter, + jclass klass, + jobject initial_object, + const jvmtiHeapCallbacks* callbacks, + const void* user_data) { + return functions->FollowReferences(this, heap_filter, klass, initial_object, callbacks, user_data); + } + + jvmtiError IterateThroughHeap(jint heap_filter, + jclass klass, + const jvmtiHeapCallbacks* callbacks, + const void* user_data) { + return functions->IterateThroughHeap(this, heap_filter, klass, callbacks, user_data); + } + + jvmtiError GetTag(jobject object, + jlong* tag_ptr) { + return functions->GetTag(this, object, tag_ptr); + } + + jvmtiError SetTag(jobject object, + jlong tag) { + return functions->SetTag(this, object, tag); + } + + jvmtiError GetObjectsWithTags(jint tag_count, + const jlong* tags, + jint* count_ptr, + jobject** object_result_ptr, + jlong** tag_result_ptr) { + return functions->GetObjectsWithTags(this, tag_count, tags, count_ptr, object_result_ptr, tag_result_ptr); + } + + jvmtiError ForceGarbageCollection() { + return functions->ForceGarbageCollection(this); + } + + jvmtiError IterateOverObjectsReachableFromObject(jobject object, + jvmtiObjectReferenceCallback object_reference_callback, + const void* user_data) { + return functions->IterateOverObjectsReachableFromObject(this, object, object_reference_callback, user_data); + } + + jvmtiError IterateOverReachableObjects(jvmtiHeapRootCallback heap_root_callback, + jvmtiStackReferenceCallback stack_ref_callback, + jvmtiObjectReferenceCallback object_ref_callback, + const void* user_data) { + return functions->IterateOverReachableObjects(this, heap_root_callback, stack_ref_callback, object_ref_callback, user_data); + } + + jvmtiError IterateOverHeap(jvmtiHeapObjectFilter object_filter, + jvmtiHeapObjectCallback heap_object_callback, + const void* user_data) { + return functions->IterateOverHeap(this, object_filter, heap_object_callback, user_data); + } + + jvmtiError IterateOverInstancesOfClass(jclass klass, + jvmtiHeapObjectFilter object_filter, + jvmtiHeapObjectCallback heap_object_callback, + const void* user_data) { + return functions->IterateOverInstancesOfClass(this, klass, object_filter, heap_object_callback, user_data); + } + + jvmtiError GetLocalObject(jthread thread, + jint depth, + jint slot, + jobject* value_ptr) { + return functions->GetLocalObject(this, thread, depth, slot, value_ptr); + } + + jvmtiError GetLocalInt(jthread thread, + jint depth, + jint slot, + jint* value_ptr) { + return functions->GetLocalInt(this, thread, depth, slot, value_ptr); + } + + jvmtiError GetLocalLong(jthread thread, + jint depth, + jint slot, + jlong* value_ptr) { + return functions->GetLocalLong(this, thread, depth, slot, value_ptr); + } + + jvmtiError GetLocalFloat(jthread thread, + jint depth, + jint slot, + jfloat* value_ptr) { + return functions->GetLocalFloat(this, thread, depth, slot, value_ptr); + } + + jvmtiError GetLocalDouble(jthread thread, + jint depth, + jint slot, + jdouble* value_ptr) { + return functions->GetLocalDouble(this, thread, depth, slot, value_ptr); + } + + jvmtiError SetLocalObject(jthread thread, + jint depth, + jint slot, + jobject value) { + return functions->SetLocalObject(this, thread, depth, slot, value); + } + + jvmtiError SetLocalInt(jthread thread, + jint depth, + jint slot, + jint value) { + return functions->SetLocalInt(this, thread, depth, slot, value); + } + + jvmtiError SetLocalLong(jthread thread, + jint depth, + jint slot, + jlong value) { + return functions->SetLocalLong(this, thread, depth, slot, value); + } + + jvmtiError SetLocalFloat(jthread thread, + jint depth, + jint slot, + jfloat value) { + return functions->SetLocalFloat(this, thread, depth, slot, value); + } + + jvmtiError SetLocalDouble(jthread thread, + jint depth, + jint slot, + jdouble value) { + return functions->SetLocalDouble(this, thread, depth, slot, value); + } + + jvmtiError SetBreakpoint(jmethodID method, + jlocation location) { + return functions->SetBreakpoint(this, method, location); + } + + jvmtiError ClearBreakpoint(jmethodID method, + jlocation location) { + return functions->ClearBreakpoint(this, method, location); + } + + jvmtiError SetFieldAccessWatch(jclass klass, + jfieldID field) { + return functions->SetFieldAccessWatch(this, klass, field); + } + + jvmtiError ClearFieldAccessWatch(jclass klass, + jfieldID field) { + return functions->ClearFieldAccessWatch(this, klass, field); + } + + jvmtiError SetFieldModificationWatch(jclass klass, + jfieldID field) { + return functions->SetFieldModificationWatch(this, klass, field); + } + + jvmtiError ClearFieldModificationWatch(jclass klass, + jfieldID field) { + return functions->ClearFieldModificationWatch(this, klass, field); + } + + jvmtiError GetLoadedClasses(jint* class_count_ptr, + jclass** classes_ptr) { + return functions->GetLoadedClasses(this, class_count_ptr, classes_ptr); + } + + jvmtiError GetClassLoaderClasses(jobject initiating_loader, + jint* class_count_ptr, + jclass** classes_ptr) { + return functions->GetClassLoaderClasses(this, initiating_loader, class_count_ptr, classes_ptr); + } + + jvmtiError GetClassSignature(jclass klass, + char** signature_ptr, + char** generic_ptr) { + return functions->GetClassSignature(this, klass, signature_ptr, generic_ptr); + } + + jvmtiError GetClassStatus(jclass klass, + jint* status_ptr) { + return functions->GetClassStatus(this, klass, status_ptr); + } + + jvmtiError GetSourceFileName(jclass klass, + char** source_name_ptr) { + return functions->GetSourceFileName(this, klass, source_name_ptr); + } + + jvmtiError GetClassModifiers(jclass klass, + jint* modifiers_ptr) { + return functions->GetClassModifiers(this, klass, modifiers_ptr); + } + + jvmtiError GetClassMethods(jclass klass, + jint* method_count_ptr, + jmethodID** methods_ptr) { + return functions->GetClassMethods(this, klass, method_count_ptr, methods_ptr); + } + + jvmtiError GetClassFields(jclass klass, + jint* field_count_ptr, + jfieldID** fields_ptr) { + return functions->GetClassFields(this, klass, field_count_ptr, fields_ptr); + } + + jvmtiError GetImplementedInterfaces(jclass klass, + jint* interface_count_ptr, + jclass** interfaces_ptr) { + return functions->GetImplementedInterfaces(this, klass, interface_count_ptr, interfaces_ptr); + } + + jvmtiError GetClassVersionNumbers(jclass klass, + jint* minor_version_ptr, + jint* major_version_ptr) { + return functions->GetClassVersionNumbers(this, klass, minor_version_ptr, major_version_ptr); + } + + jvmtiError GetConstantPool(jclass klass, + jint* constant_pool_count_ptr, + jint* constant_pool_byte_count_ptr, + unsigned char** constant_pool_bytes_ptr) { + return functions->GetConstantPool(this, klass, constant_pool_count_ptr, constant_pool_byte_count_ptr, constant_pool_bytes_ptr); + } + + jvmtiError IsInterface(jclass klass, + jboolean* is_interface_ptr) { + return functions->IsInterface(this, klass, is_interface_ptr); + } + + jvmtiError IsArrayClass(jclass klass, + jboolean* is_array_class_ptr) { + return functions->IsArrayClass(this, klass, is_array_class_ptr); + } + + jvmtiError IsModifiableClass(jclass klass, + jboolean* is_modifiable_class_ptr) { + return functions->IsModifiableClass(this, klass, is_modifiable_class_ptr); + } + + jvmtiError GetClassLoader(jclass klass, + jobject* classloader_ptr) { + return functions->GetClassLoader(this, klass, classloader_ptr); + } + + jvmtiError GetSourceDebugExtension(jclass klass, + char** source_debug_extension_ptr) { + return functions->GetSourceDebugExtension(this, klass, source_debug_extension_ptr); + } + + jvmtiError RetransformClasses(jint class_count, + const jclass* classes) { + return functions->RetransformClasses(this, class_count, classes); + } + + jvmtiError RedefineClasses(jint class_count, + const jvmtiClassDefinition* class_definitions) { + return functions->RedefineClasses(this, class_count, class_definitions); + } + + jvmtiError GetObjectSize(jobject object, + jlong* size_ptr) { + return functions->GetObjectSize(this, object, size_ptr); + } + + jvmtiError GetObjectHashCode(jobject object, + jint* hash_code_ptr) { + return functions->GetObjectHashCode(this, object, hash_code_ptr); + } + + jvmtiError GetObjectMonitorUsage(jobject object, + jvmtiMonitorUsage* info_ptr) { + return functions->GetObjectMonitorUsage(this, object, info_ptr); + } + + jvmtiError GetFieldName(jclass klass, + jfieldID field, + char** name_ptr, + char** signature_ptr, + char** generic_ptr) { + return functions->GetFieldName(this, klass, field, name_ptr, signature_ptr, generic_ptr); + } + + jvmtiError GetFieldDeclaringClass(jclass klass, + jfieldID field, + jclass* declaring_class_ptr) { + return functions->GetFieldDeclaringClass(this, klass, field, declaring_class_ptr); + } + + jvmtiError GetFieldModifiers(jclass klass, + jfieldID field, + jint* modifiers_ptr) { + return functions->GetFieldModifiers(this, klass, field, modifiers_ptr); + } + + jvmtiError IsFieldSynthetic(jclass klass, + jfieldID field, + jboolean* is_synthetic_ptr) { + return functions->IsFieldSynthetic(this, klass, field, is_synthetic_ptr); + } + + jvmtiError GetMethodName(jmethodID method, + char** name_ptr, + char** signature_ptr, + char** generic_ptr) { + return functions->GetMethodName(this, method, name_ptr, signature_ptr, generic_ptr); + } + + jvmtiError GetMethodDeclaringClass(jmethodID method, + jclass* declaring_class_ptr) { + return functions->GetMethodDeclaringClass(this, method, declaring_class_ptr); + } + + jvmtiError GetMethodModifiers(jmethodID method, + jint* modifiers_ptr) { + return functions->GetMethodModifiers(this, method, modifiers_ptr); + } + + jvmtiError GetMaxLocals(jmethodID method, + jint* max_ptr) { + return functions->GetMaxLocals(this, method, max_ptr); + } + + jvmtiError GetArgumentsSize(jmethodID method, + jint* size_ptr) { + return functions->GetArgumentsSize(this, method, size_ptr); + } + + jvmtiError GetLineNumberTable(jmethodID method, + jint* entry_count_ptr, + jvmtiLineNumberEntry** table_ptr) { + return functions->GetLineNumberTable(this, method, entry_count_ptr, table_ptr); + } + + jvmtiError GetMethodLocation(jmethodID method, + jlocation* start_location_ptr, + jlocation* end_location_ptr) { + return functions->GetMethodLocation(this, method, start_location_ptr, end_location_ptr); + } + + jvmtiError GetLocalVariableTable(jmethodID method, + jint* entry_count_ptr, + jvmtiLocalVariableEntry** table_ptr) { + return functions->GetLocalVariableTable(this, method, entry_count_ptr, table_ptr); + } + + jvmtiError GetBytecodes(jmethodID method, + jint* bytecode_count_ptr, + unsigned char** bytecodes_ptr) { + return functions->GetBytecodes(this, method, bytecode_count_ptr, bytecodes_ptr); + } + + jvmtiError IsMethodNative(jmethodID method, + jboolean* is_native_ptr) { + return functions->IsMethodNative(this, method, is_native_ptr); + } + + jvmtiError IsMethodSynthetic(jmethodID method, + jboolean* is_synthetic_ptr) { + return functions->IsMethodSynthetic(this, method, is_synthetic_ptr); + } + + jvmtiError IsMethodObsolete(jmethodID method, + jboolean* is_obsolete_ptr) { + return functions->IsMethodObsolete(this, method, is_obsolete_ptr); + } + + jvmtiError SetNativeMethodPrefix(const char* prefix) { + return functions->SetNativeMethodPrefix(this, prefix); + } + + jvmtiError SetNativeMethodPrefixes(jint prefix_count, + char** prefixes) { + return functions->SetNativeMethodPrefixes(this, prefix_count, prefixes); + } + + jvmtiError CreateRawMonitor(const char* name, + jrawMonitorID* monitor_ptr) { + return functions->CreateRawMonitor(this, name, monitor_ptr); + } + + jvmtiError DestroyRawMonitor(jrawMonitorID monitor) { + return functions->DestroyRawMonitor(this, monitor); + } + + jvmtiError RawMonitorEnter(jrawMonitorID monitor) { + return functions->RawMonitorEnter(this, monitor); + } + + jvmtiError RawMonitorExit(jrawMonitorID monitor) { + return functions->RawMonitorExit(this, monitor); + } + + jvmtiError RawMonitorWait(jrawMonitorID monitor, + jlong millis) { + return functions->RawMonitorWait(this, monitor, millis); + } + + jvmtiError RawMonitorNotify(jrawMonitorID monitor) { + return functions->RawMonitorNotify(this, monitor); + } + + jvmtiError RawMonitorNotifyAll(jrawMonitorID monitor) { + return functions->RawMonitorNotifyAll(this, monitor); + } + + jvmtiError SetJNIFunctionTable(const jniNativeInterface* function_table) { + return functions->SetJNIFunctionTable(this, function_table); + } + + jvmtiError GetJNIFunctionTable(jniNativeInterface** function_table) { + return functions->GetJNIFunctionTable(this, function_table); + } + + jvmtiError SetEventCallbacks(const jvmtiEventCallbacks* callbacks, + jint size_of_callbacks) { + return functions->SetEventCallbacks(this, callbacks, size_of_callbacks); + } + + jvmtiError SetEventNotificationMode(jvmtiEventMode mode, + jvmtiEvent event_type, + jthread event_thread, + ...) { + return functions->SetEventNotificationMode(this, mode, event_type, event_thread); + } + + jvmtiError GenerateEvents(jvmtiEvent event_type) { + return functions->GenerateEvents(this, event_type); + } + + jvmtiError GetExtensionFunctions(jint* extension_count_ptr, + jvmtiExtensionFunctionInfo** extensions) { + return functions->GetExtensionFunctions(this, extension_count_ptr, extensions); + } + + jvmtiError GetExtensionEvents(jint* extension_count_ptr, + jvmtiExtensionEventInfo** extensions) { + return functions->GetExtensionEvents(this, extension_count_ptr, extensions); + } + + jvmtiError SetExtensionEventCallback(jint extension_event_index, + jvmtiExtensionEvent callback) { + return functions->SetExtensionEventCallback(this, extension_event_index, callback); + } + + jvmtiError GetPotentialCapabilities(jvmtiCapabilities* capabilities_ptr) { + return functions->GetPotentialCapabilities(this, capabilities_ptr); + } + + jvmtiError AddCapabilities(const jvmtiCapabilities* capabilities_ptr) { + return functions->AddCapabilities(this, capabilities_ptr); + } + + jvmtiError RelinquishCapabilities(const jvmtiCapabilities* capabilities_ptr) { + return functions->RelinquishCapabilities(this, capabilities_ptr); + } + + jvmtiError GetCapabilities(jvmtiCapabilities* capabilities_ptr) { + return functions->GetCapabilities(this, capabilities_ptr); + } + + jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiTimerInfo* info_ptr) { + return functions->GetCurrentThreadCpuTimerInfo(this, info_ptr); + } + + jvmtiError GetCurrentThreadCpuTime(jlong* nanos_ptr) { + return functions->GetCurrentThreadCpuTime(this, nanos_ptr); + } + + jvmtiError GetThreadCpuTimerInfo(jvmtiTimerInfo* info_ptr) { + return functions->GetThreadCpuTimerInfo(this, info_ptr); + } + + jvmtiError GetThreadCpuTime(jthread thread, + jlong* nanos_ptr) { + return functions->GetThreadCpuTime(this, thread, nanos_ptr); + } + + jvmtiError GetTimerInfo(jvmtiTimerInfo* info_ptr) { + return functions->GetTimerInfo(this, info_ptr); + } + + jvmtiError GetTime(jlong* nanos_ptr) { + return functions->GetTime(this, nanos_ptr); + } + + jvmtiError GetAvailableProcessors(jint* processor_count_ptr) { + return functions->GetAvailableProcessors(this, processor_count_ptr); + } + + jvmtiError AddToBootstrapClassLoaderSearch(const char* segment) { + return functions->AddToBootstrapClassLoaderSearch(this, segment); + } + + jvmtiError AddToSystemClassLoaderSearch(const char* segment) { + return functions->AddToSystemClassLoaderSearch(this, segment); + } + + jvmtiError GetSystemProperties(jint* count_ptr, + char*** property_ptr) { + return functions->GetSystemProperties(this, count_ptr, property_ptr); + } + + jvmtiError GetSystemProperty(const char* property, + char** value_ptr) { + return functions->GetSystemProperty(this, property, value_ptr); + } + + jvmtiError SetSystemProperty(const char* property, + const char* value) { + return functions->SetSystemProperty(this, property, value); + } + + jvmtiError GetPhase(jvmtiPhase* phase_ptr) { + return functions->GetPhase(this, phase_ptr); + } + + jvmtiError DisposeEnvironment() { + return functions->DisposeEnvironment(this); + } + + jvmtiError SetEnvironmentLocalStorage(const void* data) { + return functions->SetEnvironmentLocalStorage(this, data); + } + + jvmtiError GetEnvironmentLocalStorage(void** data_ptr) { + return functions->GetEnvironmentLocalStorage(this, data_ptr); + } + + jvmtiError GetVersionNumber(jint* version_ptr) { + return functions->GetVersionNumber(this, version_ptr); + } + + jvmtiError GetErrorName(jvmtiError error, + char** name_ptr) { + return functions->GetErrorName(this, error, name_ptr); + } + + jvmtiError SetVerboseFlag(jvmtiVerboseFlag flag, + jboolean value) { + return functions->SetVerboseFlag(this, flag, value); + } + + jvmtiError GetJLocationFormat(jvmtiJlocationFormat* format_ptr) { + return functions->GetJLocationFormat(this, format_ptr); + } + +#endif /* __cplusplus */ +}; + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* !_JAVA_JVMTI_H_ */ + diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c new file mode 100644 index 000000000000..784a07e2b258 --- /dev/null +++ b/src/client/src/TSDBJNIConnector.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "com_taosdata_jdbc_TSDBJNIConnector.h" +#include "taos.h" +#include "tlog.h" +#include "tsclient.h" +#include "tscUtil.h" + +int __init = 0; + +JavaVM *g_vm = NULL; + +jclass g_arrayListClass; +jmethodID g_arrayListConstructFp; +jmethodID g_arrayListAddFp; + +jclass g_metadataClass; +jmethodID g_metadataConstructFp; +jfieldID g_metadataColtypeField; +jfieldID g_metadataColnameField; +jfieldID g_metadataColsizeField; +jfieldID g_metadataColindexField; + +jclass g_rowdataClass; +jmethodID g_rowdataConstructor; +jmethodID g_rowdataClearFp; +jmethodID g_rowdataSetBooleanFp; +jmethodID g_rowdataSetByteFp; +jmethodID g_rowdataSetShortFp; +jmethodID g_rowdataSetIntFp; +jmethodID g_rowdataSetLongFp; +jmethodID g_rowdataSetFloatFp; +jmethodID g_rowdataSetDoubleFp; +jmethodID g_rowdataSetStringFp; +jmethodID g_rowdataSetTimestampFp; +jmethodID g_rowdataSetByteArrayFp; + +#define JNI_SUCCESS 0 +#define JNI_TDENGINE_ERROR -1 +#define JNI_CONNECTION_NULL -2 +#define JNI_RESULT_SET_NULL -3 +#define JNI_NUM_OF_FIELDS_0 -4 +#define JNI_SQL_NULL -5 +#define JNI_FETCH_END -6 +#define JNI_OUT_OF_MEMORY -7 + +void jniGetGlobalMethod(JNIEnv *env) { + // make sure init function executed once + if (__sync_val_compare_and_swap_32(&__init, 0, 1) == 1) { + return; + } + + if (g_vm == NULL) { + (*env)->GetJavaVM(env, &g_vm); + } + + jclass arrayListClass = (*env)->FindClass(env, "java/util/ArrayList"); + g_arrayListClass = (*env)->NewGlobalRef(env, arrayListClass); + g_arrayListConstructFp = (*env)->GetMethodID(env, g_arrayListClass, "", "()V"); + g_arrayListAddFp = (*env)->GetMethodID(env, g_arrayListClass, "add", "(Ljava/lang/Object;)Z"); + (*env)->DeleteLocalRef(env, arrayListClass); + + jclass metadataClass = (*env)->FindClass(env, "com/taosdata/jdbc/ColumnMetaData"); + g_metadataClass = (*env)->NewGlobalRef(env, metadataClass); + g_metadataConstructFp = (*env)->GetMethodID(env, g_metadataClass, "", "()V"); + g_metadataColtypeField = (*env)->GetFieldID(env, g_metadataClass, "colType", "I"); + g_metadataColnameField = (*env)->GetFieldID(env, g_metadataClass, "colName", "Ljava/lang/String;"); + g_metadataColsizeField = (*env)->GetFieldID(env, g_metadataClass, "colSize", "I"); + g_metadataColindexField = (*env)->GetFieldID(env, g_metadataClass, "colIndex", "I"); + (*env)->DeleteLocalRef(env, metadataClass); + + jclass rowdataClass = (*env)->FindClass(env, "com/taosdata/jdbc/TSDBResultSetRowData"); + g_rowdataClass = (*env)->NewGlobalRef(env, rowdataClass); + g_rowdataConstructor = (*env)->GetMethodID(env, g_rowdataClass, "", "(I)V"); + g_rowdataClearFp = (*env)->GetMethodID(env, g_rowdataClass, "clear", "()V"); + g_rowdataSetBooleanFp = (*env)->GetMethodID(env, g_rowdataClass, "setBoolean", "(IZ)V"); + g_rowdataSetByteFp = (*env)->GetMethodID(env, g_rowdataClass, "setByte", "(IB)V"); + g_rowdataSetShortFp = (*env)->GetMethodID(env, g_rowdataClass, "setShort", "(IS)V"); + g_rowdataSetIntFp = (*env)->GetMethodID(env, g_rowdataClass, "setInt", "(II)V"); + g_rowdataSetLongFp = (*env)->GetMethodID(env, g_rowdataClass, "setLong", "(IJ)V"); + g_rowdataSetFloatFp = (*env)->GetMethodID(env, g_rowdataClass, "setFloat", "(IF)V"); + g_rowdataSetDoubleFp = (*env)->GetMethodID(env, g_rowdataClass, "setDouble", "(ID)V"); + g_rowdataSetStringFp = (*env)->GetMethodID(env, g_rowdataClass, "setString", "(ILjava/lang/String;)V"); + g_rowdataSetTimestampFp = (*env)->GetMethodID(env, g_rowdataClass, "setTimestamp", "(IJ)V"); + g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V"); + (*env)->DeleteLocalRef(env, rowdataClass); + + jniTrace("native method register finished"); +} + +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *env, jobject jobj, jstring jconfigDir) { + if (jconfigDir != NULL) { + const char *confDir = (*env)->GetStringUTFChars(env, jconfigDir, NULL); + if (confDir && strlen(configDir) != 0) { + strcpy(configDir, confDir); + } + (*env)->ReleaseStringUTFChars(env, jconfigDir, confDir); + } + + jniGetGlobalMethod(env); + jniTrace("jni initialized successfully, config directory: %s", configDir); +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv *env, jobject jobj, jint optionIndex, + jstring optionValue) { + if (optionValue == NULL) { + jniTrace("option index:%d value is null", optionIndex); + return 0; + } + + int res = 0; + + if (optionIndex == TSDB_OPTION_LOCALE) { + const char *locale = (*env)->GetStringUTFChars(env, optionValue, NULL); + if (locale && strlen(locale) != 0) { + res = taos_options(TSDB_OPTION_LOCALE, locale); + jniTrace("set locale to %s, result:%d", locale, res); + } else { + jniTrace("input locale is empty"); + } + (*env)->ReleaseStringUTFChars(env, optionValue, locale); + } else if (optionIndex == TSDB_OPTION_CHARSET) { + const char *charset = (*env)->GetStringUTFChars(env, optionValue, NULL); + if (charset && strlen(charset) != 0) { + res = taos_options(TSDB_OPTION_CHARSET, charset); + jniTrace("set character encoding to %s, result:%d", charset, res); + } else { + jniTrace("input character encoding is empty"); + } + (*env)->ReleaseStringUTFChars(env, optionValue, charset); + } else if (optionIndex == TSDB_OPTION_TIMEZONE) { + const char *timezone = (*env)->GetStringUTFChars(env, optionValue, NULL); + if (timezone && strlen(timezone) != 0) { + res = taos_options(TSDB_OPTION_TIMEZONE, timezone); + jniTrace("set timezone to %s, result:%d", timezone, res); + } else { + jniTrace("input timezone is empty"); + } + (*env)->ReleaseStringUTFChars(env, optionValue, timezone); + } else { + jniError("option index:%d is not found", optionIndex); + } + + return res; +} + +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost, + jint jport, jstring jdbName, jstring juser, + jstring jpass) { + jlong ret = 0; + const char *host = NULL; + const char *dbname = NULL; + const char *user = NULL; + const char *pass = NULL; + + if (jhost != NULL) { + host = (*env)->GetStringUTFChars(env, jhost, NULL); + } + if (jdbName != NULL) { + dbname = (*env)->GetStringUTFChars(env, jdbName, NULL); + } + if (juser != NULL) { + user = (*env)->GetStringUTFChars(env, juser, NULL); + } + if (jpass != NULL) { + pass = (*env)->GetStringUTFChars(env, jpass, NULL); + } + + if (user == NULL) { + jniTrace("jobj:%p, user is null, use tsDefaultUser", jobj); + user = tsDefaultUser; + } + if (pass == NULL) { + jniTrace("jobj:%p, pass is null, use tsDefaultPass", jobj); + pass = tsDefaultPass; + } + + /* + * set numOfThreadsPerCore = 0 + * means only one thread for client side scheduler + */ + tsNumOfThreadsPerCore = 0.0; + + ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, jport); + if (ret == 0) { + jniError("jobj:%p, taos:%p, connect to tdengine failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, + (char *)host, (char *)user, (char *)dbname, jport); + } else { + jniTrace("jobj:%p, taos:%p, connect to tdengine succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, + (char *)host, (char *)user, (char *)dbname, jport); + } + + if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); + if (dbname != NULL) (*env)->ReleaseStringUTFChars(env, jdbName, dbname); + if (user != NULL && user != tsDefaultUser) (*env)->ReleaseStringUTFChars(env, juser, user); + if (pass != NULL && pass != tsDefaultPass) (*env)->ReleaseStringUTFChars(env, jpass, pass); + + return ret; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(JNIEnv *env, jobject jobj, + jbyteArray jsql, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + if (jsql == NULL) { + jniError("jobj:%p, taos:%p, sql is null", jobj, tscon); + return JNI_SQL_NULL; + } + + jsize len = (*env)->GetArrayLength(env, jsql); + + char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); + if (dst == NULL) { + return JNI_OUT_OF_MEMORY; + } + + (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst); + if ((*env)->ExceptionCheck(env)) { + //todo handle error + } + + int code = taos_query(tscon, dst); + if (code != 0) { + jniError("jobj:%p, taos:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst); + free(dst); + return JNI_TDENGINE_ERROR; + } else { + int32_t affectRows = 0; + SSqlObj *pSql = ((STscObj *)tscon)->pSql; + + if (pSql->cmd.command == TSDB_SQL_INSERT) { + affectRows = taos_affected_rows(tscon); + jniTrace("jobj:%p, taos:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst); + } else { + jniTrace("jobj:%p, taos:%p, code:%d, sql:%s", jobj, tscon, code, dst); + } + + free(dst); + return affectRows; + } +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return (jint)-TSDB_CODE_INVALID_CONNECTION; + } + + return (jint)-taos_errno(tscon); +} + +JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong con) { + TAOS *tscon = (TAOS *)con; + return (*env)->NewStringUTF(env, (const char *)taos_errstr(tscon)); +} + +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + int num_fields = taos_field_count(tscon); + if (num_fields != 0) { + jlong ret = (jlong)taos_use_result(tscon); + jniTrace("jobj:%p, taos:%p, get resultset:%p", jobj, tscon, (void *)ret); + return ret; + } + + jniError("jobj:%p, taos:%p, no resultset", jobj, tscon); + return 0; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con, + jlong res) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + if ((void *)res == NULL) { + jniError("jobj:%p, taos:%p, resultset is null", jobj, tscon); + return JNI_RESULT_SET_NULL; + } + + taos_free_result((void *)res); + jniTrace("jobj:%p, taos:%p, free resultset:%p", jobj, tscon, (void *)res); + return JNI_SUCCESS; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, + jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + jint ret = taos_affected_rows(tscon); + + jniTrace("jobj:%p, taos:%p, affect rows:%d", jobj, tscon, (void *)con, ret); + + return ret; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaDataImp(JNIEnv *env, jobject jobj, + jlong con, jlong res, + jobject arrayListObj) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_RES *result = (TAOS_RES *)res; + if (result == NULL) { + jniError("jobj:%p, taos:%p, resultset is null", jobj, tscon); + return JNI_RESULT_SET_NULL; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + int num_fields = taos_num_fields(result); + + // jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, ""); + + if (num_fields == 0) { + jniError("jobj:%p, taos:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + return JNI_NUM_OF_FIELDS_0; + } else { + jniTrace("jobj:%p, taos:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + for (int i = 0; i < num_fields; ++i) { + jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp); + (*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type); + (*env)->SetIntField(env, metadataObj, g_metadataColsizeField, fields[i].bytes); + (*env)->SetIntField(env, metadataObj, g_metadataColindexField, i); + jstring metadataObjColname = (*env)->NewStringUTF(env, fields[i].name); + (*env)->SetObjectField(env, metadataObj, g_metadataColnameField, metadataObjColname); + (*env)->CallBooleanMethod(env, arrayListObj, g_arrayListAddFp, metadataObj); + } + } + + return JNI_SUCCESS; +} + +jstring jniFromNCharToByteArray(JNIEnv *env, char *nchar) { + int len = (int)strlen(nchar); + jbyteArray bytes = (*env)->NewByteArray(env, len); + (*env)->SetByteArrayRegion(env, bytes, 0, len, (jbyte *)nchar); + return bytes; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEnv *env, jobject jobj, jlong con, + jlong res, jobject rowobj) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + TAOS_RES *result = (TAOS_RES *)res; + if (result == NULL) { + jniError("jobj:%p, taos:%p, resultset is null", jobj, tscon); + return JNI_RESULT_SET_NULL; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + int num_fields = taos_num_fields(result); + + if (num_fields == 0) { + jniError("jobj:%p, taos:%p, resultset:%p, fields size is %d", jobj, tscon, res, num_fields); + return JNI_NUM_OF_FIELDS_0; + } + + TAOS_ROW row = taos_fetch_row(result); + if (row == NULL) { + jniTrace("jobj:%p, taos:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, res, num_fields); + return JNI_FETCH_END; + } + + for (int i = 0; i < num_fields; i++) { + if (row[i] == NULL) { + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetBooleanFp, i, (jboolean)(*((char *)row[i]) == 1)); + break; + case TSDB_DATA_TYPE_TINYINT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((char *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((short *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int *)row[i]); + break; + case TSDB_DATA_TYPE_BIGINT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, (char *)row[i])); + break; + case TSDB_DATA_TYPE_NCHAR: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, jniFromNCharToByteArray(env, (char *)row[i])); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i])); + break; + default: + break; + } + } + + return JNI_SUCCESS; +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionImp(JNIEnv *env, jobject jobj, + jlong con) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } else { + jniTrace("jobj:%p, taos:%p, close connection success", jobj, tscon); + taos_close(tscon); + return JNI_SUCCESS; + } +} + +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jstring jhost, + jstring juser, jstring jpass, jstring jdb, + jstring jtable, jlong jtime, + jint jperiod) { + TAOS_SUB *tsub; + jlong sub = 0; + char * host = NULL; + char * user = NULL; + char * pass = NULL; + char * db = NULL; + char * table = NULL; + int64_t time = 0; + int period = 0; + + jniGetGlobalMethod(env); + jniTrace("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj); + + if (jhost != NULL) { + host = (char *)(*env)->GetStringUTFChars(env, jhost, NULL); + } + if (juser != NULL) { + user = (char *)(*env)->GetStringUTFChars(env, juser, NULL); + } + if (jpass != NULL) { + pass = (char *)(*env)->GetStringUTFChars(env, jpass, NULL); + } + if (jdb != NULL) { + db = (char *)(*env)->GetStringUTFChars(env, jdb, NULL); + } + if (jtable != NULL) { + table = (char *)(*env)->GetStringUTFChars(env, jtable, NULL); + } + time = (int64_t)jtime; + period = (int)jperiod; + + if (user == NULL) { + jniTrace("jobj:%p, user is null, use tsDefaultUser", jobj); + user = tsDefaultUser; + } + if (pass == NULL) { + jniTrace("jobj:%p, pass is null, use tsDefaultPass", jobj); + pass = tsDefaultPass; + } + + jniTrace("jobj:%p, host:%s, user:%s, pass:%s, db:%s, table:%s, time:%d, period:%d", jobj, host, user, pass, db, table, + time, period); + tsub = taos_subscribe(host, user, pass, db, table, time, period); + sub = (jlong)tsub; + + if (sub == 0) { + jniTrace("jobj:%p, failed to subscribe to db:%s, table:%s", jobj, db, table); + } else { + jniTrace("jobj:%p, successfully subscribe to db:%s, table:%s, sub:%ld, tsub:%p", jobj, db, table, sub, tsub); + } + + if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); + if (user != NULL && user != tsDefaultUser) (*env)->ReleaseStringUTFChars(env, juser, user); + if (pass != NULL && pass != tsDefaultPass) (*env)->ReleaseStringUTFChars(env, jpass, pass); + if (db != NULL) (*env)->ReleaseStringUTFChars(env, jdb, db); + if (table != NULL) (*env)->ReleaseStringUTFChars(env, jtable, table); + + return sub; +} + +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) { + jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); + + TAOS_SUB * tsub = (TAOS_SUB *)sub; + TAOS_ROW row = taos_consume(tsub); + TAOS_FIELD *fields = taos_fetch_subfields(tsub); + int num_fields = taos_subfields_count(tsub); + + jniGetGlobalMethod(env); + + jniTrace("jobj:%p, check fields:%p, num_fields=%d", jobj, fields, num_fields); + + jobject rowobj = (*env)->NewObject(env, g_rowdataClass, g_rowdataConstructor, num_fields); + jniTrace("created a rowdata object, rowobj:%p", rowobj); + + if (row == NULL) { + jniTrace("jobj:%p, tsub:%p, fields size is %d, fetch row to the end", jobj, tsub, num_fields); + return NULL; + } + + for (int i = 0; i < num_fields; i++) { + if (row[i] == NULL) { + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetBooleanFp, i, (jboolean)(*((char *)row[i]) == 1)); + break; + case TSDB_DATA_TYPE_TINYINT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((char *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((short *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int *)row[i]); + break; + case TSDB_DATA_TYPE_BIGINT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, (char *)row[i])); + break; + case TSDB_DATA_TYPE_NCHAR: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, jniFromNCharToByteArray(env, (char *)row[i])); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i])); + break; + default: + break; + } + } + jniTrace("jobj:%p, rowdata retrieved, rowobj:%p", jobj, rowobj); + return rowobj; +} + +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub) { + TAOS_SUB *tsub = (TAOS_SUB *)sub; + taos_unsubscribe(tsub); +} + +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj, + jlong con, jbyteArray jsql) { + TAOS *tscon = (TAOS *)con; + if (tscon == NULL) { + jniError("jobj:%p, connection is closed", jobj); + return JNI_CONNECTION_NULL; + } + + if (jsql == NULL) { + jniError("jobj:%p, taos:%p, sql is null", jobj, tscon); + return JNI_SQL_NULL; + } + + jsize len = (*env)->GetArrayLength(env, jsql); + + char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); + (*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst); + if ((*env)->ExceptionCheck(env)) { + //todo handle error + } + + int code = taos_validate_sql(tscon, dst); + jniTrace("jobj:%p, conn:%p, code is %d", jobj, tscon, code); + + return code; +} + +JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) { + return (*env)->NewStringUTF(env, (const char *)tsCharset); +} \ No newline at end of file diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c new file mode 100644 index 000000000000..b3e11473d3ac --- /dev/null +++ b/src/client/src/tscAst.c @@ -0,0 +1,996 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tast.h" +#include "tlog.h" +#include "tscSyntaxtreefunction.h" +#include "tschemautil.h" +#include "tsdb.h" +#include "tskiplist.h" +#include "tsqlfunction.h" +#include "tutil.h" + +/* + * + * @date 2018-2-15 + * @version 0.2 operation for column filter + * @author liaohj + * + * @Description parse tag query expression to build ast + * ver 0.2, filter the result on first column with high priority to limit the candidate set + * ver 0.3, pipeline filter in the form of: (a+2)/9 > 14 + * + */ + +static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, SSQLToken *pToken); +static void tSQLSyntaxNodeDestroy(tSQLSyntaxNode *pNode); + +static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i); +static void destroySyntaxTree(tSQLSyntaxNode *); + +static void tSQLListTraversePrepare(tQueryInfo *colInfo, SSchema *pSchema, int32_t numOfCols, SSchema *pOneColSchema, + uint8_t optr, tVariant *val); + +static uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tSQLSyntaxNode *pLeft, + const tSQLSyntaxNode *pRight); + +/* + * Check the filter value type on the right hand side based on the column id on the left hand side, + * the filter value type must be identical to field type for relational operation + * As for binary arithmetic operation, it is not necessary to do so. + */ +static void reviseBinaryExprIfNecessary(tSQLSyntaxNode **pLeft, tSQLSyntaxNode **pRight, uint8_t *optr) { + if (*optr >= TSDB_RELATION_LESS && *optr <= TSDB_RELATION_LIKE) { + // make sure that the type of data on both sides of relational comparision are identical + if ((*pLeft)->nodeType == TSQL_NODE_VALUE) { + tVariantTypeSetType((*pLeft)->pVal, (*pRight)->pSchema->type); + } else if ((*pRight)->nodeType == TSQL_NODE_VALUE) { + tVariantTypeSetType((*pRight)->pVal, (*pLeft)->pSchema->type); + } + + } else if (*optr >= TSDB_BINARY_OP_ADD && *optr <= TSDB_BINARY_OP_REMAINDER) { + if ((*pLeft)->nodeType == TSQL_NODE_VALUE) { + /* convert to int/bigint may cause the precision loss */ + tVariantTypeSetType((*pLeft)->pVal, TSDB_DATA_TYPE_DOUBLE); + } else if ((*pRight)->nodeType == TSQL_NODE_VALUE) { + /* convert to int/bigint may cause the precision loss */ + tVariantTypeSetType((*pRight)->pVal, TSDB_DATA_TYPE_DOUBLE); + } + } + + // switch left and left and right hand side in expr + if ((*pLeft)->nodeType == TSQL_NODE_VALUE && (*pRight)->nodeType == TSQL_NODE_COL) { + SWAP(*pLeft, *pRight); + + switch (*optr) { + case TSDB_RELATION_LARGE: + (*optr) = TSDB_RELATION_LESS; + break; + case TSDB_RELATION_LESS: + (*optr) = TSDB_RELATION_LARGE; + break; + case TSDB_RELATION_LARGE_EQUAL: + (*optr) = TSDB_RELATION_LESS_EQUAL; + break; + case TSDB_RELATION_LESS_EQUAL: + (*optr) = TSDB_RELATION_LARGE_EQUAL; + break; + default:; + // for other type of operations, do nothing + } + } +} + +static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, SSQLToken *pToken) { + /* if the token is not a value, return false */ + if (pToken->type == TK_RP || (pToken->type != TK_INTEGER && pToken->type != TK_FLOAT && pToken->type != TK_ID && + pToken->type != TK_TBNAME && pToken->type != TK_STRING && pToken->type != TK_BOOL)) { + return NULL; + } + + int32_t i = 0; + size_t nodeSize = sizeof(tSQLSyntaxNode); + tSQLSyntaxNode *pNode = NULL; + + if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { + if (pToken->type == TK_ID) { + do { + size_t len = strlen(pSchema[i].name); + if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; + } while (++i < numOfCols); + + if (i == numOfCols) { + // column name is not valid, parse the expression failed + return NULL; + } + } + + nodeSize += sizeof(SSchema); + + pNode = malloc(nodeSize); + pNode->pSchema = (struct SSchema *)((char *)pNode + sizeof(tSQLSyntaxNode)); + pNode->nodeType = TSQL_NODE_COL; + + if (pToken->type == TK_ID) { + pNode->colId = (int16_t)pSchema[i].colId; + memcpy(pNode->pSchema, &pSchema[i], sizeof(SSchema)); + } else { + pNode->colId = -1; + pNode->pSchema->type = TSDB_DATA_TYPE_BINARY; + pNode->pSchema->bytes = TSDB_METER_NAME_LEN; + strcpy(pNode->pSchema->name, TSQL_TBNAME_L); + pNode->pSchema->colId = -1; + } + + } else { + nodeSize += sizeof(tVariant); + pNode = malloc(nodeSize); + pNode->pVal = (tVariant *)((char *)pNode + sizeof(tSQLSyntaxNode)); + + toTSDBType(pToken->type); + tVariantCreate(pNode->pVal, pToken); + pNode->nodeType = TSQL_NODE_VALUE; + pNode->colId = -1; + } + + return pNode; +} + +static uint8_t getBinaryExprOptr(SSQLToken *pToken) { + switch (pToken->type) { + case TK_LT: + return TSDB_RELATION_LESS; + case TK_LE: + return TSDB_RELATION_LESS_EQUAL; + case TK_GT: + return TSDB_RELATION_LARGE; + case TK_GE: + return TSDB_RELATION_LARGE_EQUAL; + case TK_NE: + return TSDB_RELATION_NOT_EQUAL; + case TK_AND: + return TSDB_RELATION_AND; + case TK_OR: + return TSDB_RELATION_OR; + case TK_EQ: + return TSDB_RELATION_EQUAL; + case TK_PLUS: + return TSDB_BINARY_OP_ADD; + case TK_MINUS: + return TSDB_BINARY_OP_SUBTRACT; + case TK_STAR: + return TSDB_BINARY_OP_MULTIPLY; + case TK_SLASH: + return TSDB_BINARY_OP_DIVIDE; + case TK_REM: + return TSDB_BINARY_OP_REMAINDER; + case TK_LIKE: + return TSDB_RELATION_LIKE; + default: { return 0; } + } +} + +// previous generated expr is reduced as the left child +static tSQLSyntaxNode *parseRemainStr(char *pstr, tSQLBinaryExpr *pExpr, SSchema *pSchema, int32_t optr, + int32_t numOfCols, int32_t *i) { + // set the previous generated node as the left child of new root + tSQLSyntaxNode *pLeft = malloc(sizeof(tSQLSyntaxNode)); + pLeft->nodeType = TSQL_NODE_EXPR; + pLeft->pExpr = pExpr; + + // remain is the right child + tSQLSyntaxNode *pRight = createSyntaxTree(pSchema, numOfCols, pstr, i); + if (pRight == NULL || (pRight->nodeType == TSQL_NODE_COL && pLeft->nodeType != TSQL_NODE_VALUE) || + (pLeft->nodeType == TSQL_NODE_VALUE && pRight->nodeType != TSQL_NODE_COL)) { + tSQLSyntaxNodeDestroy(pLeft); + tSQLSyntaxNodeDestroy(pRight); + return NULL; + } + + tSQLBinaryExpr *pNewExpr = (tSQLBinaryExpr *)malloc(sizeof(tSQLBinaryExpr)); + uint8_t k = optr; + reviseBinaryExprIfNecessary(&pLeft, &pRight, &k); + pNewExpr->pLeft = pLeft; + pNewExpr->pRight = pRight; + pNewExpr->nSQLBinaryOptr = k; + + pNewExpr->filterOnPrimaryKey = isQueryOnPrimaryKey(pSchema[0].name, pLeft, pRight); + + tSQLSyntaxNode *pn = malloc(sizeof(tSQLSyntaxNode)); + pn->nodeType = TSQL_NODE_EXPR; + pn->pExpr = pNewExpr; + + return pn; +} + +uint8_t isQueryOnPrimaryKey(const char *primaryColumnName, const tSQLSyntaxNode *pLeft, const tSQLSyntaxNode *pRight) { + if (pLeft->nodeType == TSQL_NODE_COL) { + // if left node is the primary column,return true + return (strcmp(primaryColumnName, pLeft->pSchema->name) == 0) ? 1 : 0; + } else { + // if any children have query on primary key, their parents are also keep + // this value + return ((pLeft->nodeType == TSQL_NODE_EXPR && pLeft->pExpr->filterOnPrimaryKey == 1) || + (pRight->nodeType == TSQL_NODE_EXPR && pRight->pExpr->filterOnPrimaryKey == 1)) == true + ? 1 + : 0; + } +} + +static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, char *str, int32_t *i) { + SSQLToken t0 = {0}; + + tStrGetToken(str, i, &t0, false); + if (t0.n == 0) { + return NULL; + } + + tSQLSyntaxNode *pLeft = NULL; + if (t0.type == TK_LP) { // start new left child branch + pLeft = createSyntaxTree(pSchema, numOfCols, str, i); + } else { + if (t0.type == TK_RP) { + return NULL; + } + pLeft = tSQLSyntaxNodeCreate(pSchema, numOfCols, &t0); + } + + if (pLeft == NULL) { + return NULL; + } + + tStrGetToken(str, i, &t0, false); + if (t0.n == 0 || t0.type == TK_RP) { + if (pLeft->nodeType != TSQL_NODE_EXPR) { + // if left is not the expr, it is not a legal expr + tSQLSyntaxNodeDestroy(pLeft); + return NULL; + } + + return pLeft; + } + + // get the operator of expr + uint8_t optr = getBinaryExprOptr(&t0); + if (optr <= 0) { + pError("not support binary operator:%d", t0.type); + tSQLSyntaxNodeDestroy(pLeft); + return NULL; + } + + assert(pLeft != NULL); + tSQLSyntaxNode *pRight = NULL; + + if (t0.type == TK_AND || t0.type == TK_OR || t0.type == TK_LP) { + pRight = createSyntaxTree(pSchema, numOfCols, str, i); + } else { + /* + * In case that pLeft is a field identification, + * we parse the value in expression according to queried field type, + * if we do not get the information, in case of value of field presented first, + * we revised the value after the binary expression is completed. + */ + tStrGetToken(str, i, &t0, true); + if (t0.n == 0) { + tSQLSyntaxNodeDestroy(pLeft); // illegal expression + return NULL; + } + + if (t0.type == TK_LP) { + pRight = createSyntaxTree(pSchema, numOfCols, str, i); + } else { + pRight = tSQLSyntaxNodeCreate(pSchema, numOfCols, &t0); + } + } + + if (pRight == NULL) { + tSQLSyntaxNodeDestroy(pLeft); + return NULL; + } + + /* create binary expr as the child of new parent node */ + tSQLBinaryExpr *pBinExpr = (tSQLBinaryExpr *)malloc(sizeof(tSQLBinaryExpr)); + reviseBinaryExprIfNecessary(&pLeft, &pRight, &optr); + + pBinExpr->filterOnPrimaryKey = isQueryOnPrimaryKey(pSchema[0].name, pLeft, pRight); + pBinExpr->pLeft = pLeft; + pBinExpr->pRight = pRight; + pBinExpr->nSQLBinaryOptr = optr; + + tStrGetToken(str, i, &t0, true); + + if (t0.n == 0 || t0.type == TK_RP) { + tSQLSyntaxNode *pn = malloc(sizeof(tSQLSyntaxNode)); + pn->nodeType = TSQL_NODE_EXPR; + pn->pExpr = pBinExpr; + pn->colId = -1; + return pn; + } else { + int32_t optr = getBinaryExprOptr(&t0); + if (optr <= 0) { + pError("not support binary operator:%d", t0.type); + return NULL; + } + + return parseRemainStr(str, pBinExpr, pSchema, optr, numOfCols, i); + } +} + +void tSQLBinaryExprFromString(tSQLBinaryExpr **pExpr, SSchema *pSchema, int32_t numOfCols, char *src, int32_t len) { + *pExpr = NULL; + if (len <= 0 || src == NULL || pSchema == NULL || numOfCols <= 0) { + return; + } + + int32_t pos = 0; + tSQLSyntaxNode *pStxNode = createSyntaxTree(pSchema, numOfCols, src, &pos); + if (pStxNode != NULL) { + assert(pStxNode->nodeType == TSQL_NODE_EXPR); + *pExpr = pStxNode->pExpr; + free(pStxNode); + } +} + +int32_t tSQLBinaryExprToStringImpl(tSQLSyntaxNode *pNode, char *dst, uint8_t type) { + int32_t len = 0; + if (type == TSQL_NODE_EXPR) { + *dst = '('; + tSQLBinaryExprToString(pNode->pExpr, dst + 1, &len); + len += 2; + *(dst + len - 1) = ')'; + } else if (type == TSQL_NODE_COL) { + len = sprintf(dst, "%s", pNode->pSchema->name); + } else { + len = tVariantToString(pNode->pVal, dst); + } + return len; +} + +static char *tSQLOptrToString(uint8_t optr, char *dst) { + switch (optr) { + case TSDB_RELATION_LESS: { + *dst = '<'; + dst += 1; + break; + } + case TSDB_RELATION_LESS_EQUAL: { + *dst = '<'; + *(dst + 1) = '='; + dst += 2; + break; + } + case TSDB_RELATION_EQUAL: { + *dst = '='; + dst += 1; + break; + } + case TSDB_RELATION_LARGE: { + *dst = '>'; + dst += 1; + break; + } + case TSDB_RELATION_LARGE_EQUAL: { + *dst = '>'; + *(dst + 1) = '='; + dst += 2; + break; + } + case TSDB_RELATION_NOT_EQUAL: { + *dst = '<'; + *(dst + 1) = '>'; + dst += 2; + break; + } + case TSDB_RELATION_OR: { + memcpy(dst, "or", 2); + dst += 2; + break; + } + case TSDB_RELATION_AND: { + memcpy(dst, "and", 3); + dst += 3; + break; + } + default:; + } + return dst; +} + +void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) { + if (pExpr == NULL) { + *dst = 0; + *len = 0; + } + + int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType); + dst += lhs; + *len = lhs; + + char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst); + *len += (start - dst); + + *len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType); +} + +static void UNUSED_FUNC destroySyntaxTree(tSQLSyntaxNode *pNode) { tSQLSyntaxNodeDestroy(pNode); } + +static void tSQLSyntaxNodeDestroy(tSQLSyntaxNode *pNode) { + if (pNode == NULL) return; + + if (pNode->nodeType == TSQL_NODE_EXPR) { + tSQLBinaryExprDestroy(&pNode->pExpr); + } else if (pNode->nodeType == TSQL_NODE_VALUE) { + tVariantDestroy(pNode->pVal); + } + + free(pNode); +} + +void tSQLBinaryExprDestroy(tSQLBinaryExpr **pExprs) { + if (*pExprs == NULL) return; + + tSQLSyntaxNodeDestroy((*pExprs)->pLeft); + tSQLSyntaxNodeDestroy((*pExprs)->pRight); + + free(*pExprs); + *pExprs = NULL; +} + +static int32_t compareIntVal(const void *pLeft, const void *pRight) { + DEFAULT_COMP(GET_INT64_VAL(pLeft), GET_INT64_VAL(pRight)); +} + +static int32_t compareIntDoubleVal(const void *pLeft, const void *pRight) { + DEFAULT_COMP(GET_INT64_VAL(pLeft), GET_DOUBLE_VAL(pRight)); +} + +static int32_t compareDoubleVal(const void *pLeft, const void *pRight) { + DEFAULT_COMP(GET_DOUBLE_VAL(pLeft), GET_DOUBLE_VAL(pRight)); +} + +static int32_t compareDoubleIntVal(const void *pLeft, const void *pRight) { + double ret = (*(double *)pLeft) - (*(int64_t *)pRight); + if (fabs(ret) < DBL_EPSILON) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } +} + +static int32_t compareStrVal(const void *pLeft, const void *pRight) { + int32_t ret = strcmp(pLeft, pRight); + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } +} + +static int32_t compareWStrVal(const void *pLeft, const void *pRight) { + int32_t ret = wcscmp(pLeft, pRight); + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } +} + +static int32_t compareStrPatternComp(const void *pLeft, const void *pRight) { + SPatternCompareInfo pInfo = {'%', '_'}; + + const char *pattern = pRight; + const char *str = pLeft; + + if (patternMatch(pattern, str, strlen(str), &pInfo) == TSDB_PATTERN_MATCH) { + return 0; + } else { + return 1; + } +} + +static int32_t compareWStrPatternComp(const void *pLeft, const void *pRight) { + SPatternCompareInfo pInfo = {'%', '_'}; + + const wchar_t *pattern = pRight; + const wchar_t *str = pLeft; + + if (WCSPatternMatch(pattern, str, wcslen(str), &pInfo) == TSDB_PATTERN_MATCH) { + return 0; + } else { + return 1; + } +} + +static __compar_fn_t getFilterComparator(int32_t type, int32_t filterType, int32_t optr) { + __compar_fn_t comparator = NULL; + + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_BOOL: { + if (filterType >= TSDB_DATA_TYPE_BOOL && filterType <= TSDB_DATA_TYPE_BIGINT) { + comparator = compareIntVal; + } else if (filterType >= TSDB_DATA_TYPE_FLOAT && filterType <= TSDB_DATA_TYPE_DOUBLE) { + comparator = compareIntDoubleVal; + } + break; + } + + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + if (filterType >= TSDB_DATA_TYPE_BOOL && filterType <= TSDB_DATA_TYPE_BIGINT) { + comparator = compareDoubleIntVal; + } else if (filterType >= TSDB_DATA_TYPE_FLOAT && filterType <= TSDB_DATA_TYPE_DOUBLE) { + comparator = compareDoubleVal; + } + break; + } + + case TSDB_DATA_TYPE_BINARY: { + assert(filterType == TSDB_DATA_TYPE_BINARY); + + if (optr == TSDB_RELATION_LIKE) { + /* wildcard query using like operator */ + comparator = compareStrPatternComp; + } else { + /* normal relational comparator */ + comparator = compareStrVal; + } + + break; + } + + case TSDB_DATA_TYPE_NCHAR: { + assert(filterType == TSDB_DATA_TYPE_NCHAR); + + if (optr == TSDB_RELATION_LIKE) { + comparator = compareWStrPatternComp; + } else { + comparator = compareWStrVal; + } + + break; + } + default: + comparator = compareIntVal; + break; + } + + return comparator; +} + +static void setInitialValueForRangeQueryCondition(tSKipListQueryCond *q, int8_t type) { + q->lowerBndRelOptr = TSDB_RELATION_LARGE; + q->upperBndRelOptr = TSDB_RELATION_LESS; + + switch (type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: { + q->upperBnd.nType = TSDB_DATA_TYPE_BIGINT; + q->lowerBnd.nType = TSDB_DATA_TYPE_BIGINT; + + q->upperBnd.i64Key = INT64_MAX; + q->lowerBnd.i64Key = INT64_MIN; + break; + }; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + q->upperBnd.nType = TSDB_DATA_TYPE_DOUBLE; + q->lowerBnd.nType = TSDB_DATA_TYPE_DOUBLE; + q->upperBnd.dKey = DBL_MAX; + q->lowerBnd.dKey = -DBL_MIN; + break; + }; + case TSDB_DATA_TYPE_NCHAR: + case TSDB_DATA_TYPE_BINARY: { + q->upperBnd.nType = type; + q->upperBnd.pz = "\0"; + q->upperBnd.nLen = -1; + + q->lowerBnd.nType = type; + q->lowerBnd.pz = "\0"; + q->lowerBnd.nLen = 0; + } + } +} + +static void tSQLDoFilterInitialResult(tSkipList *pSkipList, bool (*fp)(), tQueryInfo *colInfo, + tQueryResultset *result) { + // primary key, search according to skiplist + if (colInfo->colIdx == 0 && colInfo->optr != TSDB_RELATION_LIKE) { + tSKipListQueryCond q; + setInitialValueForRangeQueryCondition(&q, colInfo->q.nType); + + switch (colInfo->optr) { + case TSDB_RELATION_EQUAL: { + result->num = + tSkipListPointQuery(pSkipList, &colInfo->q, 1, INCLUDE_POINT_QUERY, (tSkipListNode ***)&result->pRes); + break; + } + case TSDB_RELATION_NOT_EQUAL: { + result->num = + tSkipListPointQuery(pSkipList, &colInfo->q, 1, EXCLUDE_POINT_QUERY, (tSkipListNode ***)&result->pRes); + break; + } + case TSDB_RELATION_LESS_EQUAL: { + tVariantAssign(&q.upperBnd, &colInfo->q); + q.upperBndRelOptr = colInfo->optr; + result->num = tSkipListQuery(pSkipList, &q, (tSkipListNode ***)&result->pRes); + break; + } + case TSDB_RELATION_LESS: { + tVariantAssign(&q.upperBnd, &colInfo->q); + result->num = tSkipListQuery(pSkipList, &q, (tSkipListNode ***)&result->pRes); + break; + } + case TSDB_RELATION_LARGE: { + tVariantAssign(&q.lowerBnd, &colInfo->q); + result->num = tSkipListQuery(pSkipList, &q, (tSkipListNode ***)&result->pRes); + break; + } + case TSDB_RELATION_LARGE_EQUAL: { + tVariantAssign(&q.lowerBnd, &colInfo->q); + q.lowerBndRelOptr = colInfo->optr; + result->num = tSkipListQuery(pSkipList, &q, (tSkipListNode ***)&result->pRes); + break; + } + default: + pTrace("skiplist:%p, unsupport query operator:%d", pSkipList, colInfo->optr); + } + + tSkipListDestroyKey(&q.upperBnd); + tSkipListDestroyKey(&q.lowerBnd); + } else { + // brutal force search + result->num = tSkipListIterateList(pSkipList, (tSkipListNode ***)&result->pRes, fp, colInfo); + } +} + +void tSQLListTraversePrepare(tQueryInfo *colInfo, SSchema *pSchema, int32_t numOfCols, SSchema *pOneColSchema, + uint8_t optr, tVariant *val) { + int32_t i = 0, offset = 0; + if (strcasecmp(pOneColSchema->name, TSQL_TBNAME_L) == 0) { + i = -1; + offset = -1; + } else { + while (i < numOfCols) { + if (pSchema[i].bytes == pOneColSchema->bytes && pSchema[i].type == pOneColSchema->type && + strcmp(pSchema[i].name, pOneColSchema->name) == 0) { + break; + } else { + offset += pSchema[i++].bytes; + } + } + } + + colInfo->pSchema = pSchema; + colInfo->colIdx = i; + colInfo->optr = optr; + colInfo->offset = offset; + colInfo->comparator = getFilterComparator(pOneColSchema->type, val->nType, optr); + + if (colInfo->pSchema[i].type != val->nType) { + /* convert the query string to be inline with the data type of the queried tags */ + if (colInfo->pSchema[i].type == TSDB_DATA_TYPE_NCHAR && val->nType == TSDB_DATA_TYPE_BINARY) { + colInfo->q.nLen = TSDB_MAX_TAGS_LEN / TSDB_NCHAR_SIZE; + + colInfo->q.wpz = calloc(1, TSDB_MAX_TAGS_LEN); + colInfo->q.nType = TSDB_DATA_TYPE_NCHAR; + + taosMbsToUcs4(val->pz, val->nLen, (char *)colInfo->q.wpz, TSDB_MAX_TAGS_LEN); + colInfo->q.nLen = wcslen(colInfo->q.wpz) + 1; + return; + } else if (colInfo->pSchema[i].type == TSDB_DATA_TYPE_BINARY && val->nType == TSDB_DATA_TYPE_NCHAR) { + colInfo->q.nLen = TSDB_MAX_TAGS_LEN; + colInfo->q.pz = calloc(1, TSDB_MAX_TAGS_LEN); + colInfo->q.nType = TSDB_DATA_TYPE_BINARY; + + taosUcs4ToMbs(val->wpz, val->nLen, colInfo->q.pz); + colInfo->q.nLen = strlen(colInfo->q.pz) + 1; + return; + } + } + + tVariantAssign(&colInfo->q, val); +} + +/* + * qsort comparator + * sort the result to ensure meters with the same gid is grouped together + */ +static int32_t compareByAddr(const void *pLeft, const void *pRight) { + int64_t p1 = (int64_t) * ((tSkipListNode **)pLeft); + int64_t p2 = (int64_t) * ((tSkipListNode **)pRight); + + DEFAULT_COMP(p1, p2); +} + +int32_t merge(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResultset *pFinalRes) { + pFinalRes->pRes = malloc(POINTER_BYTES * (pLeft->num + pRight->num)); + pFinalRes->num = 0; + + // sort according to address + tSkipListNode **pLeftNodes = (tSkipListNode **)pLeft->pRes; + qsort(pLeftNodes, pLeft->num, sizeof(pLeft->pRes[0]), compareByAddr); + + tSkipListNode **pRightNodes = (tSkipListNode **)pRight->pRes; + qsort(pRightNodes, pRight->num, sizeof(pRight->pRes[0]), compareByAddr); + + int32_t i = 0, j = 0; + // merge two sorted arrays in O(n) time + while (i < pLeft->num && j < pRight->num) { + int64_t ret = (int64_t)pLeftNodes[i] - (int64_t)pRightNodes[j]; + + if (ret < 0) { + pFinalRes->pRes[pFinalRes->num++] = pLeftNodes[i++]; + } else if (ret > 0) { + pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; + } else { // pNode->key > pkey[i] + pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; + i++; + } + } + + while (i < pLeft->num) { + pFinalRes->pRes[pFinalRes->num++] = pLeftNodes[i++]; + } + + while (j < pRight->num) { + pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j++]; + } + + return pFinalRes->num; +} + +int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResultset *pFinalRes) { + int64_t num = MIN(pLeft->num, pRight->num); + + pFinalRes->pRes = malloc(POINTER_BYTES * num); + pFinalRes->num = 0; + + // sort according to address + tSkipListNode **pLeftNodes = (tSkipListNode **)pLeft->pRes; + qsort(pLeftNodes, pLeft->num, sizeof(pLeft->pRes[0]), compareByAddr); + + tSkipListNode **pRightNodes = (tSkipListNode **)pRight->pRes; + qsort(pRightNodes, pRight->num, sizeof(pRight->pRes[0]), compareByAddr); + + int32_t i = 0, j = 0; + // merge two sorted arrays in O(n) time + while (i < pLeft->num && j < pRight->num) { + int64_t ret = (int64_t)pLeftNodes[i] - (int64_t)pRightNodes[j]; + + if (ret < 0) { + i++; + } else if (ret > 0) { + j++; + } else { // pNode->key > pkey[i] + pFinalRes->pRes[pFinalRes->num++] = pRightNodes[j]; + i++; + j++; + } + } + + return pFinalRes->num; +} + +/* + * + */ +void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipListNode *, void *), tQueryInfo *colInfo, + tQueryResultset *pResult) { + assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE); + + // brutal force search + int32_t num = pResult->num; + for (int32_t i = 0, j = 0; i < pResult->num; ++i) { + if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], colInfo) == true)) { + pResult->pRes[j++] = pResult->pRes[i]; + } else { + num--; + } + } + + pResult->num = num; +} + +// post-root order traverse syntax tree +void tSQLBinaryExprTraverse(tSQLBinaryExpr *pExprs, tSkipList *pSkipList, SSchema *pSchema, int32_t numOfCols, + bool (*fp)(tSkipListNode *, void *), tQueryResultset *result) { + if (pExprs == NULL) return; + + tSQLSyntaxNode *pLeft = pExprs->pLeft; + tSQLSyntaxNode *pRight = pExprs->pRight; + + // recursive traverse left child branch + if (pLeft->nodeType == TSQL_NODE_EXPR || pRight->nodeType == TSQL_NODE_EXPR) { + uint8_t weight = pLeft->pExpr->filterOnPrimaryKey + pRight->pExpr->filterOnPrimaryKey; + + if (weight == 0 && result->num > 0 && pSkipList == NULL) { + /* base on the initial filter result to perform the secondary filter */ + tSQLBinaryExprTraverse(pLeft->pExpr, pSkipList, pSchema, numOfCols, fp, result); + tSQLBinaryExprTraverse(pRight->pExpr, pSkipList, pSchema, numOfCols, fp, result); + } else if (weight == 0 || weight == 2 || (weight == 1 && pExprs->nSQLBinaryOptr == TSDB_RELATION_OR)) { + tQueryResultset rLeft = {0}; + tQueryResultset rRight = {0}; + + tSQLBinaryExprTraverse(pLeft->pExpr, pSkipList, pSchema, numOfCols, fp, &rLeft); + tSQLBinaryExprTraverse(pRight->pExpr, pSkipList, pSchema, numOfCols, fp, &rRight); + + if (pExprs->nSQLBinaryOptr == TSDB_RELATION_AND) { // CROSS + intersect(&rLeft, &rRight, result); + } else if (pExprs->nSQLBinaryOptr == TSDB_RELATION_OR) { // or + merge(&rLeft, &rRight, result); + } else { + assert(false); + } + + free(rLeft.pRes); + free(rRight.pRes); + } else { + /* + * first, we filter results based on the skiplist index, which is initial filter stage, + * then, we conduct the secondary filter operation based on the result from the initial filter stage. + */ + if (pExprs->nSQLBinaryOptr == TSDB_RELATION_AND) { + tSQLBinaryExpr *pFirst = (pLeft->pExpr->filterOnPrimaryKey == 1) ? pLeft->pExpr : pRight->pExpr; + tSQLBinaryExpr *pSec = (pLeft->pExpr->filterOnPrimaryKey == 1) ? pRight->pExpr : pLeft->pExpr; + assert(pFirst != pSec && pFirst != NULL && pSec != NULL); + + // we filter the result based on the skiplist index + tSQLBinaryExprTraverse(pFirst, pSkipList, pSchema, numOfCols, fp, result); + + /* + * recursively perform the filter operation based on the initial results, + * So, we do not set the skiplist index as a parameter + */ + tSQLBinaryExprTraverse(pSec, NULL, pSchema, numOfCols, fp, result); + } else { + assert(false); + } + } + + } else { // column project + assert(pLeft->nodeType == TSQL_NODE_COL && pRight->nodeType == TSQL_NODE_VALUE); + tVariant *pCond = pRight->pVal; + SSchema * pTagSchema = pLeft->pSchema; + + tQueryInfo queryColInfo = {0}; + tSQLListTraversePrepare(&queryColInfo, pSchema, numOfCols, pTagSchema, pExprs->nSQLBinaryOptr, pCond); + + if (pSkipList == NULL) { + tSQLListTraverseOnResult(pExprs, fp, &queryColInfo, result); + } else { + assert(result->num == 0); + tSQLDoFilterInitialResult(pSkipList, fp, &queryColInfo, result); + } + + tVariantDestroy(&queryColInfo.q); + } +} + +void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, + char *(*getSourceDataBlock)(void *, char *, int32_t)) { + if (pExprs == NULL) { + return; + } + + tSQLSyntaxNode *pLeft = pExprs->pLeft; + tSQLSyntaxNode *pRight = pExprs->pRight; + + /* the left output has result from the left child syntax tree */ + char *pLeftOutput = malloc(sizeof(int64_t) * numOfRows); + if (pLeft->nodeType == TSQL_NODE_EXPR) { + tSQLBinaryExprCalcTraverse(pLeft->pExpr, numOfRows, pLeftOutput, param, order, getSourceDataBlock); + } + + /* the right output has result from the right child syntax tree */ + char *pRightOutput = malloc(sizeof(int64_t) * numOfRows); + if (pRight->nodeType == TSQL_NODE_EXPR) { + tSQLBinaryExprCalcTraverse(pRight->pExpr, numOfRows, pRightOutput, param, order, getSourceDataBlock); + } + + if (pLeft->nodeType == TSQL_NODE_EXPR) { + if (pRight->nodeType == TSQL_NODE_EXPR) { // exprLeft + exprRight + /* the type of returned value of one expression is always double float + * precious */ + _bi_consumer_fn_t fp = tGetBiConsumerFn(TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, pExprs->nSQLBinaryOptr); + fp(pLeftOutput, pRightOutput, numOfRows, numOfRows, pOutput, order); + + } else if (pRight->nodeType == TSQL_NODE_COL) { // exprLeft + columnRight + _bi_consumer_fn_t fp = tGetBiConsumerFn(TSDB_DATA_TYPE_DOUBLE, pRight->pSchema->type, pExprs->nSQLBinaryOptr); + // set input buffer + char *pInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->colId); + fp(pLeftOutput, pInputData, numOfRows, numOfRows, pOutput, order); + + } else if (pRight->nodeType == TSQL_NODE_VALUE) { // exprLeft + 12 + _bi_consumer_fn_t fp = tGetBiConsumerFn(TSDB_DATA_TYPE_DOUBLE, pRight->pVal->nType, pExprs->nSQLBinaryOptr); + fp(pLeftOutput, &pRight->pVal->i64Key, numOfRows, 1, pOutput, order); + } + } else if (pLeft->nodeType == TSQL_NODE_COL) { + // column data specified on left-hand-side + char *pLeftInputData = getSourceDataBlock(param, pLeft->pSchema->name, pLeft->colId); + if (pRight->nodeType == TSQL_NODE_EXPR) { // columnLeft + expr2 + _bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pSchema->type, TSDB_DATA_TYPE_DOUBLE, pExprs->nSQLBinaryOptr); + fp(pLeftInputData, pRightOutput, numOfRows, numOfRows, pOutput, order); + + } else if (pRight->nodeType == TSQL_NODE_COL) { // columnLeft + columnRight + // column data specified on right-hand-side + char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->colId); + + _bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pSchema->type, pRight->pSchema->type, pExprs->nSQLBinaryOptr); + fp(pLeftInputData, pRightInputData, numOfRows, numOfRows, pOutput, order); + + } else if (pRight->nodeType == TSQL_NODE_VALUE) { // columnLeft + 12 + _bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pSchema->type, pRight->pVal->nType, pExprs->nSQLBinaryOptr); + fp(pLeftInputData, &pRight->pVal->i64Key, numOfRows, 1, pOutput, order); + } + } else { + // column data specified on left-hand-side + if (pRight->nodeType == TSQL_NODE_EXPR) { // 12 + expr2 + _bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pVal->nType, TSDB_DATA_TYPE_DOUBLE, pExprs->nSQLBinaryOptr); + fp(&pLeft->pVal->i64Key, pRightOutput, 1, numOfRows, pOutput, order); + + } else if (pRight->nodeType == TSQL_NODE_COL) { // 12 + columnRight + // column data specified on right-hand-side + char *pRightInputData = getSourceDataBlock(param, pRight->pSchema->name, pRight->colId); + _bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pVal->nType, pRight->pSchema->type, pExprs->nSQLBinaryOptr); + fp(&pLeft->pVal->i64Key, pRightInputData, 1, numOfRows, pOutput, order); + + } else if (pRight->nodeType == TSQL_NODE_VALUE) { // 12 + 12 + _bi_consumer_fn_t fp = tGetBiConsumerFn(pLeft->pVal->nType, pRight->pVal->nType, pExprs->nSQLBinaryOptr); + fp(&pLeft->pVal->i64Key, &pRight->pVal->i64Key, 1, 1, pOutput, order); + } + } + + free(pLeftOutput); + free(pRightOutput); +} + +void tSQLBinaryExprTrv(tSQLBinaryExpr *pExprs, int32_t *val, int16_t *ids) { + if (pExprs == NULL) { + return; + } + + tSQLSyntaxNode *pLeft = pExprs->pLeft; + tSQLSyntaxNode *pRight = pExprs->pRight; + + // recursive traverse left child branch + if (pLeft->nodeType == TSQL_NODE_EXPR) { + tSQLBinaryExprTrv(pLeft->pExpr, val, ids); + } else if (pLeft->nodeType == TSQL_NODE_COL) { + ids[*val] = pLeft->pSchema->colId; + (*val) += 1; + } + + if (pRight->nodeType == TSQL_NODE_EXPR) { + tSQLBinaryExprTrv(pRight->pExpr, val, ids); + } else if (pRight->nodeType == TSQL_NODE_COL) { + ids[*val] = pRight->pSchema->colId; + (*val) += 1; + } +} \ No newline at end of file diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c new file mode 100644 index 000000000000..51c93778baf4 --- /dev/null +++ b/src/client/src/tscAsync.c @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "tlog.h" +#include "trpc.h" +#include "tscProfile.h" +#include "tscSecondaryMerge.h" +#include "tscUtil.h" +#include "tsclient.h" +#include "tsocket.h" +#include "tsql.h" +#include "tutil.h" + +void tscProcessFetchRow(SSchedMsg *pMsg); +void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows); +static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows); +static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows); + +static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()); + +/* + * proxy function to perform sequentially query&retrieve operation. + * If sql queries upon metric and two-stage merge procedure is not needed, + * it will sequentially query&retrieve data for all vnodes in pCmd->pMetricMeta + */ +static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows); + +void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) { + tscError("bug!!! pObj:%p", pObj); + globalCode = TSDB_CODE_DISCONNECTED; + tscQueueAsyncError(fp, param); + return; + } + + SSqlObj *pSql = (SSqlObj *)malloc(sizeof(SSqlObj)); + if (pSql == NULL) { + tscError("failed to malloc sqlObj"); + tscQueueAsyncError(fp, param); + return; + } + + memset(pSql, 0, sizeof(SSqlObj)); + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + pSql->signature = pSql; + pSql->pTscObj = pObj; + pSql->fp = fp; + pSql->param = param; + + tscAllocPayloadWithSize(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); + + int32_t sqlLen = strlen(sqlstr); + if (sqlLen > TSDB_MAX_SQL_LEN) { + tscError("%p sql string too long", pSql); + tscQueueAsyncError(fp, param); + return; + } + + pSql->sqlstr = malloc(sqlLen + 1); + if (pSql->sqlstr == NULL) { + tscError("%p failed to malloc sql string buffer", pSql); + tscQueueAsyncError(fp, param); + return; + } + + pRes->qhandle = 0; + pRes->numOfRows = 1; + + strtolower(sqlstr, pSql->sqlstr); + pSql->sqlstr[sqlLen] = 0; + tscTrace("%p Async SQL: %s, pObj:%p", pSql, pSql->sqlstr, pObj); + + int32_t code = tsParseSql(pSql, pObj->acctId, pObj->db, true); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + + if (code != TSDB_CODE_SUCCESS) { + pSql->res.code = (uint8_t)code; + tscQueueAsyncRes(pSql); + return; + } + + tscDoQuery(pSql); +} + +static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) { + if (tres == NULL) { + return; + } + + SSqlObj *pSql = (SSqlObj *)tres; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + // sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx + if (numOfRows == 0 && tscProjectionQueryOnMetric(pSql)) { + // vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx + assert(pSql->cmd.vnodeIdx >= 1); + + /* reach the maximum number of output rows, abort */ + if (pSql->cmd.defaultVal[0] > 0 && pRes->numOfTotal >= pCmd->defaultVal[0]) { + (*pSql->fetchFp)(param, tres, 0); + return; + } + + /* update the limit value according to current retrieval results */ + pCmd->limit.limit = pSql->cmd.defaultVal[0] - pRes->numOfTotal; + + if ((++(pSql->cmd.vnodeIdx)) <= pSql->cmd.pMetricMeta->numOfVnodes) { + pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. + + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + + pSql->fp = tscProcessAsyncRetrieveNextVnode; + tscProcessSql(pSql); + return; + } + } else { + /* localreducer has handle this situation */ + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { + pRes->numOfTotal += pRes->numOfRows; + } + } + + (*pSql->fetchFp)(param, tres, numOfRows); +} + +// actual continue retrieve function with user-specified callback function +static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)()) { + SSqlObj *pSql = (SSqlObj *)tres; + if (pSql == NULL) { // error + tscError("sql object is NULL"); + tscQueueAsyncError(pSql->fetchFp, param); + return; + } + + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + if (pRes->qhandle == 0 || numOfRows != 0) { + if (pRes->qhandle == 0) { + tscError("qhandle is NULL"); + } else { + pRes->code = numOfRows; + } + + tscQueueAsyncError(pSql->fetchFp, param); + return; + } + + pSql->fp = fp; + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC && pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + tscProcessSql(pSql); +} + +/* + * retrieve callback for fetch rows proxy. It serves as the callback function of querying vnode + */ +static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows) { + tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncFetchRowsProxy); +} + +static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows) { + tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncRetrieve); +} + +void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) { + SSqlObj *pSql = (SSqlObj *)taosa; + if (pSql == NULL || pSql->signature != pSql) { + tscError("sql object is NULL"); + globalCode = TSDB_CODE_DISCONNECTED; + tscQueueAsyncError(fp, param); + return; + } + + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + if (pRes->qhandle == 0) { + tscError("qhandle is NULL"); + tscQueueAsyncError(fp, param); + return; + } + + // user-defined callback function is stored in fetchFp + pSql->fetchFp = fp; + pSql->fp = tscProcessAsyncFetchRowsProxy; + + pSql->param = param; + + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC && pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + tscProcessSql(pSql); +} + +void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW), void *param) { + SSqlObj *pSql = (SSqlObj *)taosa; + if (pSql == NULL || pSql->signature != pSql) { + tscError("sql object is NULL"); + globalCode = TSDB_CODE_DISCONNECTED; + tscQueueAsyncError(fp, param); + return; + } + + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + if (pRes->qhandle == 0) { + tscError("qhandle is NULL"); + tscQueueAsyncError(fp, param); + return; + } + + pSql->fetchFp = fp; + pSql->param = param; + + if (pRes->row >= pRes->numOfRows) { + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + pSql->fp = tscProcessAsyncRetrieve; + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + tscProcessSql(pSql); + } else { + SSchedMsg schedMsg; + schedMsg.fp = tscProcessFetchRow; + schedMsg.ahandle = pSql; + schedMsg.thandle = pRes->tsrow; + schedMsg.msg = NULL; + taosScheduleTask(tscQhandle, &schedMsg); + } +} + +void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { + SSqlObj *pSql = (SSqlObj *)tres; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + if (numOfRows == 0) { + // sequentially retrieve data from remain vnodes. + if (tscProjectionQueryOnMetric(pSql)) { + /* + * vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved + */ + assert(pSql->cmd.vnodeIdx >= 1); + + /* reach the maximum number of output rows, abort */ + if (pSql->cmd.defaultVal[0] > 0 && pRes->numOfTotal >= pCmd->defaultVal[0]) { + (*pSql->fetchFp)(pSql->param, pSql, NULL); + return; + } + + /* update the limit value according to current retrieval results */ + pCmd->limit.limit = pSql->cmd.defaultVal[0] - pRes->numOfTotal; + + if ((++pSql->cmd.vnodeIdx) <= pSql->cmd.pMetricMeta->numOfVnodes) { + pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. + + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + + pSql->fp = tscProcessAsyncContinueRetrieve; + tscProcessSql(pSql); + return; + } + } else { + (*pSql->fetchFp)(pSql->param, pSql, NULL); + } + } else { + for (int i = 0; i < pCmd->numOfCols; ++i) + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + pRes->row++; + + (*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow); + } +} + +void tscProcessFetchRow(SSchedMsg *pMsg) { + SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + for (int i = 0; i < pCmd->numOfCols; ++i) + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + pRes->row++; + + (*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow); +} + +void tscProcessAsyncRes(SSchedMsg *pMsg) { + SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; + STscObj *pTscObj = pSql->pTscObj; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + void *taosres = pSql; + + // pCmd may be released, so cache pCmd->command + int cmd = pCmd->command; + int code = pRes->code ? -pRes->code : pRes->numOfRows; + + if ((tscKeepConn[cmd] == 0 || (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS)) && + pSql->pStream == NULL) { + if (pSql->thandle) taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pTscObj->user); + + pSql->thandle = NULL; + } + + // in case of async insert, restore the user specified callback function + bool shouldFree = tscShouldFreeAsyncSqlObj(pSql); + + if (cmd == TSDB_SQL_INSERT) { + assert(pSql->fp != NULL); + pSql->fp = pSql->fetchFp; + } + + (*pSql->fp)(pSql->param, taosres, code); + + if (shouldFree) { + tscFreeSqlObj(pSql); + tscTrace("%p Async sql is automatically freed in async res", pSql); + } +} + +void tscProcessAsyncError(SSchedMsg *pMsg) { + void (*fp)() = pMsg->ahandle; + + (*fp)(pMsg->thandle, NULL, -1); +} + +void tscQueueAsyncError(void(*fp), void *param) { + SSchedMsg schedMsg; + schedMsg.fp = tscProcessAsyncError; + schedMsg.ahandle = fp; + schedMsg.thandle = param; + schedMsg.msg = NULL; + taosScheduleTask(tscQhandle, &schedMsg); +} + +void tscQueueAsyncRes(SSqlObj *pSql) { + if (pSql == NULL || pSql->signature != pSql) { + tscTrace("%p SqlObj is freed, not add into queue async res", pSql); + return; + } else { + tscTrace("%p add into queued async res, code:%d", pSql, pSql->res.code); + } + + SSchedMsg schedMsg; + schedMsg.fp = tscProcessAsyncRes; + schedMsg.ahandle = pSql; + schedMsg.thandle = (void *)1; + schedMsg.msg = NULL; + taosScheduleTask(tscQhandle, &schedMsg); +} + +void tscProcessAsyncFree(SSchedMsg *pMsg) { + SSqlObj *pSql = (SSqlObj *)pMsg->ahandle; + tscTrace("%p sql is freed", pSql); + taos_free_result(pSql); +} + +void tscQueueAsyncFreeResult(SSqlObj *pSql) { + tscTrace("%p sqlObj put in queue to async free", pSql); + + SSchedMsg schedMsg; + schedMsg.fp = tscProcessAsyncFree; + schedMsg.ahandle = pSql; + schedMsg.thandle = (void *)1; + schedMsg.msg = NULL; + taosScheduleTask(tscQhandle, &schedMsg); +} + +void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) { + SSqlObj *pSql = (SSqlObj *)param; + SSqlCmd *pCmd = &pSql->cmd; + int32_t code = TSDB_CODE_SUCCESS; + + assert(!pCmd->isInsertFromFile && pSql->signature == pSql); + + SDataBlockList *pDataBlocks = pCmd->pDataBlocks; + if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + // restore user defined fp + pSql->fp = pSql->fetchFp; + tscTrace("%p Async insertion completed, destroy data block list", pSql); + + // release data block data + tscDestroyBlockArrayList(&pCmd->pDataBlocks); + + // all data has been sent to vnode, call user function + (*pSql->fp)(pSql->param, tres, numOfRows); + } else { + do { + code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]); + if (code != TSDB_CODE_SUCCESS) { + tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d", + pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code); + } + + } while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize); + + // build submit msg may fail + if (code == TSDB_CODE_SUCCESS) { + tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize); + tscProcessSql(pSql); + } + } +} + +int tscSendMsgToServer(SSqlObj *pSql); + +void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { + SSqlObj *pSql = (SSqlObj *)param; + if (pSql == NULL || pSql->signature != pSql) return; + + STscObj *pObj = pSql->pTscObj; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + if (pSql->fp == NULL) { + tscError("%p callBack is NULL!!!", pSql); + return; + } + + if (pSql->fp == (void *)1) { + pSql->fp = NULL; + + if (code != 0) { + code = abs(code); + pRes->code = code; + tscTrace("%p failed to renew meterMeta", pSql); + sem_post(&pSql->rspSem); + } else { + tscTrace("%p renew meterMeta successfully, command:%d, code:%d, thandle:%p, retry:%d", + pSql, pSql->cmd.command, pSql->res.code, pSql->thandle, pSql->retry); + + assert(pSql->cmd.pMeterMeta == NULL); + tscGetMeterMeta(pSql, pSql->cmd.name); + + code = tscSendMsgToServer(pSql); + if (code != 0) { + pRes->code = code; + sem_post(&pSql->rspSem); + } + } + + return; + } + + if (code != 0) { + pRes->code = (uint8_t)abs(code); + tscQueueAsyncRes(pSql); + return; + } + + if (pSql->pStream == NULL) { + // check if it is a sub-query of metric query first, if true, enter another routine + // todo refactor + if (pSql->fp == tscRetrieveDataRes || pSql->fp == tscRetrieveFromVnodeCallBack) { + assert(pCmd->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx > 0 && pSql->param != NULL); + + SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; + SSqlObj * pParObj = trs->pParentSqlObj; + assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx && pSql->cmd.pMeterMeta->numOfTags != 0); + tscTrace("%p get metricMeta during metric query successfully", pSql); + + code = tscGetMeterMeta(pSql, pSql->cmd.name); + pRes->code = code; + + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + + code = tscGetMetricMeta(pSql, pSql->cmd.name); + pRes->code = code; + + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + } else { // normal async query continues + code = tsParseSql(pSql, pObj->acctId, pObj->db, false); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + } + } else { // stream computing + code = tscGetMeterMeta(pSql, pSql->cmd.name); + pRes->code = code; + + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + + if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_METRIC(pCmd)) { + code = tscGetMetricMeta(pSql, pSql->cmd.name); + pRes->code = code; + + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + } + } + + if (code != 0) { + tscQueueAsyncRes(pSql); + return; + } + + if (pSql->pStream) { + tscTrace("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command); + /* + * NOTE: + * transfer the sql function for metric query before get meter/metric meta, + * since in callback functions, + * only tscProcessSql(pStream->pSql) is executed! + */ + tscTansformSQLFunctionForMetricQuery(&pSql->cmd); + tscIncStreamExecutionCount(pSql->pStream); + } else { + tscTrace("%p get meterMeta/metricMeta successfully", pSql); + } + + tscDoQuery(pSql); +} diff --git a/src/client/src/tscCache.c b/src/client/src/tscCache.c new file mode 100644 index 000000000000..304e5e0247d6 --- /dev/null +++ b/src/client/src/tscCache.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tlog.h" +#include "tmempool.h" +#include "tsclient.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +typedef struct _c_hash_t { + uint32_t ip; + short port; + struct _c_hash_t *prev; + struct _c_hash_t *next; + void * data; + uint64_t time; +} SConnHash; + +typedef struct { + SConnHash ** connHashList; + mpool_h connHashMemPool; + int maxSessions; + int total; + int * count; + int64_t keepTimer; + pthread_mutex_t mutex; + void (*cleanFp)(void *); + void *tmrCtrl; + void *pTimer; +} SConnCache; + +int taosHashConn(void *handle, uint32_t ip, short port, char *user) { + SConnCache *pObj = (SConnCache *)handle; + int hash = 0; + // size_t user_len = strlen(user); + + hash = ip >> 16; + hash += (unsigned short)(ip & 0xFFFF); + hash += (unsigned short)port; + while (*user != '\0') { + hash += *user; + user++; + } + + hash = hash % pObj->maxSessions; + + return hash; +} + +void taosRemoveExpiredNodes(SConnCache *pObj, SConnHash *pNode, int hash, uint64_t time) { + if (pNode == NULL) return; + if (time < pObj->keepTimer + pNode->time) return; + + SConnHash *pPrev = pNode->prev, *pNext; + + while (pNode) { + (*pObj->cleanFp)(pNode->data); + pNext = pNode->next; + pObj->total--; + pObj->count[hash]--; + tscTrace("%p ip:0x%x:%d:%d:%p removed, connections in cache:%d", pNode->data, pNode->ip, pNode->port, hash, pNode, + pObj->count[hash]); + taosMemPoolFree(pObj->connHashMemPool, (char *)pNode); + pNode = pNext; + } + + if (pPrev) + pPrev->next = NULL; + else + pObj->connHashList[hash] = NULL; +} + +void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, short port, char *user) { + int hash; + SConnHash * pNode; + SConnCache *pObj; + + uint64_t time = taosGetTimestampMs(); + + pObj = (SConnCache *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + if (data == NULL || ip == 0) { + tscTrace("data:%p ip:0x%x:%d not valid, not added in cache", data, ip, port); + return NULL; + } + + hash = taosHashConn(pObj, ip, port, user); + pNode = (SConnHash *)taosMemPoolMalloc(pObj->connHashMemPool); + pNode->ip = ip; + pNode->port = port; + pNode->data = data; + pNode->prev = NULL; + pNode->time = time; + + pthread_mutex_lock(&pObj->mutex); + + pNode->next = pObj->connHashList[hash]; + if (pObj->connHashList[hash] != NULL) (pObj->connHashList[hash])->prev = pNode; + pObj->connHashList[hash] = pNode; + + pObj->total++; + pObj->count[hash]++; + taosRemoveExpiredNodes(pObj, pNode->next, hash, time); + + pthread_mutex_unlock(&pObj->mutex); + + tscTrace("%p ip:0x%x:%d:%d:%p added, connections in cache:%d", data, ip, port, hash, pNode, pObj->count[hash]); + + return pObj; +} + +void taosCleanConnCache(void *handle, void *tmrId) { + int hash; + SConnHash * pNode; + SConnCache *pObj; + + pObj = (SConnCache *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + if (pObj->pTimer != tmrId) return; + + uint64_t time = taosGetTimestampMs(); + + for (hash = 0; hash < pObj->maxSessions; ++hash) { + pthread_mutex_lock(&pObj->mutex); + pNode = pObj->connHashList[hash]; + taosRemoveExpiredNodes(pObj, pNode, hash, time); + pthread_mutex_unlock(&pObj->mutex); + } + + // tscTrace("timer, total connections in cache:%d", pObj->total); + taosTmrReset(taosCleanConnCache, pObj->keepTimer * 2, pObj, pObj->tmrCtrl, &pObj->pTimer); +} + +void *taosGetConnFromCache(void *handle, uint32_t ip, short port, char *user) { + int hash; + SConnHash * pNode; + SConnCache *pObj; + void * pData = NULL; + + pObj = (SConnCache *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + uint64_t time = taosGetTimestampMs(); + + hash = taosHashConn(pObj, ip, port, user); + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->connHashList[hash]; + while (pNode) { + if (time >= pObj->keepTimer + pNode->time) { + taosRemoveExpiredNodes(pObj, pNode, hash, time); + pNode = NULL; + break; + } + + if (pNode->ip == ip && pNode->port == port) break; + + pNode = pNode->next; + } + + if (pNode) { + taosRemoveExpiredNodes(pObj, pNode->next, hash, time); + + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->connHashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + pData = pNode->data; + taosMemPoolFree(pObj->connHashMemPool, (char *)pNode); + pObj->total--; + pObj->count[hash]--; + } + + pthread_mutex_unlock(&pObj->mutex); + + if (pData) { + tscTrace("%p ip:0x%x:%d:%d:%p retrieved, connections in cache:%d", pData, ip, port, hash, pNode, pObj->count[hash]); + } + + return pData; +} + +void *taosOpenConnCache(int maxSessions, void (*cleanFp)(void *), void *tmrCtrl, int64_t keepTimer) { + SConnHash **connHashList; + mpool_h connHashMemPool; + SConnCache *pObj; + + connHashMemPool = taosMemPoolInit(maxSessions, sizeof(SConnHash)); + if (connHashMemPool == 0) return NULL; + + connHashList = calloc(sizeof(SConnHash *), maxSessions); + if (connHashList == 0) { + taosMemPoolCleanUp(connHashMemPool); + return NULL; + } + + pObj = malloc(sizeof(SConnCache)); + if (pObj == NULL) { + taosMemPoolCleanUp(connHashMemPool); + free(connHashList); + return NULL; + } + memset(pObj, 0, sizeof(SConnCache)); + + pObj->count = calloc(sizeof(int), maxSessions); + pObj->total = 0; + pObj->keepTimer = keepTimer; + pObj->maxSessions = maxSessions; + pObj->connHashMemPool = connHashMemPool; + pObj->connHashList = connHashList; + pObj->cleanFp = cleanFp; + pObj->tmrCtrl = tmrCtrl; + taosTmrReset(taosCleanConnCache, pObj->keepTimer * 2, pObj, pObj->tmrCtrl, &pObj->pTimer); + + pthread_mutex_init(&pObj->mutex, NULL); + + return pObj; +} + +void taosCloseConnCache(void *handle) { + SConnCache *pObj; + + pObj = (SConnCache *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + pthread_mutex_lock(&pObj->mutex); + + taosTmrStopA(&(pObj->pTimer)); + + if (pObj->connHashMemPool) taosMemPoolCleanUp(pObj->connHashMemPool); + + tfree(pObj->connHashList); + tfree(pObj->count) + + pthread_mutex_unlock(&pObj->mutex); + + pthread_mutex_destroy(&pObj->mutex); + + memset(pObj, 0, sizeof(SConnCache)); + free(pObj); +} diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c new file mode 100644 index 000000000000..998f32268870 --- /dev/null +++ b/src/client/src/tscFunctionImpl.c @@ -0,0 +1,4682 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#pragma GCC diagnostic ignored "-Wincompatible-pointer-types" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tast.h" +#include "textbuffer.h" +#include "thistogram.h" +#include "tinterpolation.h" +#include "tlog.h" +#include "tscSyntaxtreefunction.h" +#include "tsqlfunction.h" +#include "ttypes.h" +#include "tutil.h" + +typedef struct tValuePair { + tVariant v; + int64_t timestamp; +} tValuePair; + +typedef struct SSpreadRuntime { + double start; + double end; + char valid; +} SSpreadRuntime; + +void getResultInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, + int16_t *bytes) { + if (!isValidDataType(dataType, dataBytes)) { + pError("Illegal data type %d or data type length %d", dataType, dataBytes); + return; + } + + if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX || functionId == TSDB_FUNC_FIRST || + functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || + functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || + functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || + functionId == TSDB_FUNC_INTERP || functionId == TSDB_FUNC_LAST_ROW) { + *type = (int16_t)dataType; + *bytes = (int16_t)dataBytes; + return; + } + + if (functionId == TSDB_FUNC_COUNT) { + *type = TSDB_DATA_TYPE_BIGINT; + *bytes = sizeof(int64_t); + return; + } + + if (functionId == TSDB_FUNC_AVG || functionId == TSDB_FUNC_PERCT || functionId == TSDB_FUNC_APERCT || + functionId == TSDB_FUNC_STDDEV || functionId == TSDB_FUNC_ARITHM || functionId == TSDB_FUNC_SPREAD || + functionId == TSDB_FUNC_WAVG) { + *type = TSDB_DATA_TYPE_DOUBLE; + *bytes = sizeof(double); + return; + } + + if (functionId == TSDB_FUNC_SUM) { + if (dataType >= TSDB_DATA_TYPE_TINYINT && dataType <= TSDB_DATA_TYPE_BIGINT) { + *type = TSDB_DATA_TYPE_BIGINT; + } else { + *type = TSDB_DATA_TYPE_DOUBLE; + } + + *bytes = sizeof(int64_t); + return; + } + + if (functionId == TSDB_FUNC_LEASTSQR) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string + } else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = dataBytes + DATA_SET_FLAG_SIZE + TSDB_KEYSIZE; + } else if (functionId == TSDB_FUNC_SPREAD_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SSpreadRuntime); + } else if (functionId == TSDB_FUNC_WAVG_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SWavgRuntime); + } else if (functionId == TSDB_FUNC_MIN_DST || functionId == TSDB_FUNC_MAX_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = dataBytes + DATA_SET_FLAG_SIZE; + } else if (functionId == TSDB_FUNC_SUM_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SSumRuntime); + } else if (functionId == TSDB_FUNC_AVG_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SAvgRuntime); + } else if (functionId == TSDB_FUNC_TOP_DST || functionId == TSDB_FUNC_BOTTOM_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(int64_t) + sizeof(tValuePair) * param; + } else if (functionId == TSDB_FUNC_APERCT_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1) + sizeof(SHistogramInfo); + } else if (functionId == TSDB_FUNC_LAST_ROW_DST) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = dataBytes + DATA_SET_FLAG_SIZE + TSDB_KEYSIZE; + } +} + +/* + * whether has put the first result into the output buffer + * decided by if there are timestamp of value and the seperator ',' + */ + +#define IS_DATA_NOT_ASSIGNED(ctx) (*(char *)((ctx)->aOutputBuf + TSDB_KEYSIZE) != DATA_SET_FLAG) +#define SET_DATA_ASSIGNED(ctx) (*(char *)((ctx)->aOutputBuf + TSDB_KEYSIZE) = DATA_SET_FLAG) + +#define SET_VAL(ctx, numOfElem, numOfRes) \ + do { \ + if ((numOfElem) <= 0) { \ + break; \ + } \ + (ctx)->numOfIteratedElems += (numOfElem); \ + (ctx)->numOfOutputElems = (numOfRes); \ + } while (0); + +#define INC_INIT_VAL(ctx, numOfElem, numOfRes) \ + do { \ + (ctx)->numOfIteratedElems += (numOfElem); \ + (ctx)->numOfOutputElems += (numOfRes); \ + } while (0); + +void noop(SQLFunctionCtx *pCtx) { UNUSED(pCtx); /* suppress warning*/ } +bool no_next_step(SQLFunctionCtx *pCtx) { + UNUSED(pCtx); + return false; +} + +#define INIT_VAL(ctx) \ + do { \ + (ctx)->currentStage = 0; \ + (ctx)->numOfOutputElems = 0; \ + (ctx)->numOfIteratedElems = 0; \ + } while (0); + +#define GET_INPUT_CHAR(x) (((char *)((x)->aInputElemBuf)) + ((x)->startOffset) * ((x)->inputBytes)) +#define GET_INPUT_CHAR_INDEX(x, y) (GET_INPUT_CHAR(x) + (y) * (x)->inputBytes) + +#define SET_HAS_DATA_FLAG(x) ((x) = DATA_SET_FLAG) +#define HAS_DATA_FLAG(x) ((x) == DATA_SET_FLAG) + +#define tPow(x) ((x) * (x)) + +void function_setup(SQLFunctionCtx *pCtx) { + memset(pCtx->aOutputBuf, 0, pCtx->outputBytes); + pCtx->intermediateBuf[0].i64Key = 0; + INIT_VAL(pCtx); +} + +/** + * in handling the stable query, function_finalize is called after the secondary + * merge being completed, during the first merge procedure, which is executed at the + * vnode side, the finalize will never be called. + * todo: add more information for debug in SQLFunctionCtx + * @param pCtx + */ +void function_finalize(SQLFunctionCtx *pCtx) { + if (pCtx->numOfIteratedElems == 0) { + pTrace("no result generated, result is set to NULL"); + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + } +} + +static bool count_function(SQLFunctionCtx *pCtx) { + int32_t numOfElem = 0; + + /* + * 1. column data missing (schema modified) causes pCtx->hasNullValue == true. pCtx->preAggVals.isSet == true; + * 2. for general non-primary key columns, pCtx->hasNullValue may be true or false, pCtx->preAggVals.isSet == true; + * 3. for primary key column, pCtx->hasNullValue always be false, pCtx->preAggVals.isSet == false; + */ + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus)) { // Pre-aggregation + if (pCtx->preAggVals.isSet) { + numOfElem = pCtx->size - pCtx->preAggVals.numOfNullPoints; + } else { + assert(pCtx->hasNullValue == false); + numOfElem = pCtx->size; + } + + goto _count_over; + } + + /* + * In following cases, the data block is loaded: + * 1. it is a first/last file block for a query + * 2. actual block data is loaded in case of handling other queries, such as apercentile/wavg/stddev etc. + * 3. it is a cache block + */ + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + char *val = GET_INPUT_CHAR_INDEX(pCtx, i); + if (isNull(val, pCtx->inputType)) { + continue; + } + + numOfElem += 1; + } + } else { + numOfElem = pCtx->size; + } + +_count_over: + *((int64_t *)pCtx->aOutputBuf) += numOfElem; + SET_VAL(pCtx, numOfElem, 1); + return true; +} + +static bool count_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + *((int64_t *)pCtx->aOutputBuf) += 1; + return true; +} + +static void count_dist_merge(SQLFunctionCtx *pCtx) { + int64_t *pData = (int64_t *)GET_INPUT_CHAR(pCtx); + for (int32_t i = 0; i < pCtx->size; ++i) { + *((int64_t *)pCtx->aOutputBuf) += pData[i]; + } + + SET_VAL(pCtx, pCtx->size, 1); +} + +/** + * 1. If the column value for filter exists, we need to load the SFields, which serves + * as the pre-filter to decide if the actual data block is required or not. + * 2. If it queries on the non-primary timestamp column, SFields is also required to get the not-null value. + * + * @param colId + * @param filterCols + * @return + */ +int32_t count_load_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus) { + if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + return BLK_DATA_NO_NEEDED; + } else { + return BLK_DATA_FILEDS_NEEDED; + } +} + +int32_t no_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus) { + return BLK_DATA_NO_NEEDED; +} + +#define LIST_ADD(x, n, p) \ + for (int32_t i = 0; i < (n); ++i) { \ + (x) += (p)[i]; \ + }; + +#define TYPED_LIST_ADD(x, n, s, t) \ + do { \ + t *p = (t *)s; \ + LIST_ADD(x, n, p); \ + } while (0); + +#define LIST_ADD_N(x, n, p, t, numOfElem, tsdbType) \ + { \ + t *d = (t *)(p); \ + for (int32_t i = 0; i < (n); ++i) { \ + if (isNull((char *)&(d)[i], tsdbType)) { \ + continue; \ + }; \ + (x) += (d)[i]; \ + numOfElem++; \ + } \ + }; + +#define LOOPCHECK(v, d, n, sign) \ + for (int32_t i = 0; i < (n); ++i) { \ + (v) = (((v) < (d)[i]) ^ (sign)) ? (d)[i] : (v); \ + } + +#define LOOPCHECK_N(val, list, num, tsdbType, sign, notNullElem) \ + for (int32_t i = 0; i < (num); ++i) { \ + if (isNull((char *)&(list)[i], tsdbType)) { \ + continue; \ + } \ + (val) = (((val) < (list)[i]) ^ (sign)) ? (list)[i] : (val); \ + notNullElem += 1; \ + } + +#define TYPED_LOOPCHECK(t, v, d, n, sign) \ + do { \ + t *_d = (t *)d; \ + t *v1 = (t *)v; \ + LOOPCHECK(*v1, _d, n, sign); \ + } while (0) + +#define TYPED_LOOPCHECK_N(type, data, list, num, tsdbType, sign, notNullElems) \ + do { \ + type *_data = (type *)data; \ + type *_list = (type *)list; \ + LOOPCHECK_N(*_data, _list, num, tsdbType, sign, notNullElems); \ + } while (0) + +static bool sum_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) && pCtx->preAggVals.isSet) { + // it's the whole block to be calculated, so the assert must be correct + assert(pCtx->size >= pCtx->preAggVals.numOfNullPoints); + notNullElems = (pCtx->size - pCtx->preAggVals.numOfNullPoints); + + if (notNullElems > 0) { + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + int64_t *retVal = pCtx->aOutputBuf; + *retVal += pCtx->preAggVals.sum; + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + double *retVal = pCtx->aOutputBuf; + *retVal += *(double *)&(pCtx->preAggVals.sum); + } + } + goto _sum_over; + } + + void *pData = GET_INPUT_CHAR(pCtx); + + if (pCtx->hasNullValue) { + notNullElems = 0; + + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + int64_t *retVal = pCtx->aOutputBuf; + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int8_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int16_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int32_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int64_t, notNullElems, pCtx->inputType); + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *retVal = pCtx->aOutputBuf; + LIST_ADD_N(*retVal, pCtx->size, pData, double, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + double *retVal = pCtx->aOutputBuf; + LIST_ADD_N(*retVal, pCtx->size, pData, float, notNullElems, pCtx->inputType); + } + } else { + notNullElems = pCtx->size; + + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + int64_t *retVal = pCtx->aOutputBuf; + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int8_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int16_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int32_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int64_t); + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *retVal = pCtx->aOutputBuf; + TYPED_LIST_ADD(*retVal, pCtx->size, pData, double); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + double *retVal = pCtx->aOutputBuf; + TYPED_LIST_ADD(*retVal, pCtx->size, pData, float); + } + } + +_sum_over: + // data in the check operation are all null, not output + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool sum_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + int64_t *res = pCtx->aOutputBuf; + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + *res += *(int8_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + *res += *(int16_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + *res += *(int32_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + *res += *(int64_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *retVal = pCtx->aOutputBuf; + *retVal += *(double *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + double *retVal = pCtx->aOutputBuf; + *retVal += *(float *)pData; + } + + return true; +} + +static bool sum_dist_intern_function(SQLFunctionCtx *pCtx) { + sum_function(pCtx); + + // keep the result data in output buffer, not in the intermediate buffer + if (pCtx->numOfIteratedElems > 0) { + char *pOutputBuf = pCtx->aOutputBuf; + *(pOutputBuf + sizeof(double)) = DATA_SET_FLAG; + } + + return true; +} + +static bool sum_dist_intern_function_f(SQLFunctionCtx *pCtx, int32_t index) { + sum_function_f(pCtx, index); + + /* keep the result data in output buffer, not in the intermediate buffer */ + if (pCtx->numOfIteratedElems) { + char *pOutputBuf = pCtx->aOutputBuf; + *(pOutputBuf + sizeof(double)) = DATA_SET_FLAG; + } + + return true; +} + +static int32_t do_sum_merge_impl(const SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + int32_t type = (pCtx->outputType != TSDB_DATA_TYPE_BINARY) ? pCtx->outputType : pCtx->inputType; + char * input = GET_INPUT_CHAR(pCtx); + + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { + SSumRuntime *pInput = (SSumRuntime *)input; + if (pInput->valFlag != DATA_SET_FLAG) { + continue; + } + + notNullElems++; + + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: { + *(int64_t *)pCtx->aOutputBuf += pInput->iOutput; + break; + }; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + *(double *)pCtx->aOutputBuf += pInput->dOutput; + } + } + } + return notNullElems; +} + +static void sum_dist_merge(SQLFunctionCtx *pCtx) { + int32_t notNullElems = do_sum_merge_impl(pCtx); + + SET_VAL(pCtx, notNullElems, 1); + SSumRuntime *pSumRuntime = (SSumRuntime *)pCtx->aOutputBuf; + + if (notNullElems > 0) { + pCtx->numOfIteratedElems += notNullElems; + pSumRuntime->valFlag = DATA_SET_FLAG; + } +} + +static void sum_dist_second_merge(SQLFunctionCtx *pCtx) { + int32_t notNullElems = do_sum_merge_impl(pCtx); + + SET_VAL(pCtx, notNullElems, 1); + /* NOTE: no flag value exists for secondary merge */ + if (notNullElems > 0) { + pCtx->numOfIteratedElems += notNullElems; + } +} + +static int32_t precal_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus) { + return BLK_DATA_FILEDS_NEEDED; +} + +static int32_t data_req_load_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus) { + return BLK_DATA_ALL_NEEDED; +} + +// todo: if column in current data block are null, opt for this case +static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus) { + if (pCtx->order == TSQL_SO_DESC) { + return BLK_DATA_NO_NEEDED; + } + + /* no result for first query, data block is required */ + if (pCtx->numOfOutputElems <= 0) { + return BLK_DATA_ALL_NEEDED; + } else { + return BLK_DATA_NO_NEEDED; + } +} + +static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus) { + if (pCtx->order == TSQL_SO_ASC) { + return BLK_DATA_NO_NEEDED; + } + + if (pCtx->numOfOutputElems <= 0) { + return BLK_DATA_ALL_NEEDED; + } else { + return BLK_DATA_NO_NEEDED; + } +} + +static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, + int32_t blockStatus) { + if (pCtx->order == TSQL_SO_DESC) { + return BLK_DATA_NO_NEEDED; + } + + if (IS_DATA_NOT_ASSIGNED(pCtx)) { + return BLK_DATA_ALL_NEEDED; + } else { + // data in current block is not earlier than current result + TSKEY ts = *(TSKEY *)pCtx->aOutputBuf; + return (ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + } +} + +static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, + int32_t blockStatus) { + if (pCtx->order == TSQL_SO_ASC) { + return BLK_DATA_NO_NEEDED; + } + + if (IS_DATA_NOT_ASSIGNED(pCtx)) { + return BLK_DATA_ALL_NEEDED; + } else { + TSKEY ts = *(TSKEY *)pCtx->aOutputBuf; + return (ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + } +} + +/* + * the average value is calculated in finalize routine, since current routine does not know the exact number of points + */ +static void avg_finalizer(SQLFunctionCtx *pCtx) { + // pCtx->numOfIteratedElems is the number of not null elements in current + // query range + if (pCtx->numOfIteratedElems == 0) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + return; // empty table + } + + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + int64_t *retVal = pCtx->aOutputBuf; + *(double *)pCtx->aOutputBuf = (*retVal) / (double)pCtx->numOfIteratedElems; + } else { + double *retVal = pCtx->aOutputBuf; + *retVal = *retVal / (double)pCtx->numOfIteratedElems; + } + + /* cannot set the numOfIteratedElems again since it is set during previous iteration */ + pCtx->numOfOutputElems = 1; +} + +static void avg_dist_function_setup(SQLFunctionCtx *pCtx) { + pCtx->intermediateBuf[0].nType = TSDB_DATA_TYPE_DOUBLE; + memset(pCtx->aOutputBuf, 0, pCtx->outputBytes); + INIT_VAL(pCtx); +} + +static void avg_dist_merge(SQLFunctionCtx *pCtx) { + SAvgRuntime *pDest = (SAvgRuntime *)pCtx->aOutputBuf; + + char *input = GET_INPUT_CHAR(pCtx); + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { + SAvgRuntime *pInput = (SAvgRuntime *)input; + if (pInput->valFlag != DATA_SET_FLAG) { // current buffer is null + continue; + } + + pDest->sum += pInput->sum; + pDest->num += pInput->num; + pDest->valFlag = DATA_SET_FLAG; + } + + /* if the data set flag is not set, the result is null */ + pCtx->numOfIteratedElems = pDest->num; +} + +static void avg_dist_second_merge(SQLFunctionCtx *pCtx) { + double *sum = pCtx->aOutputBuf; + char * input = GET_INPUT_CHAR(pCtx); + + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { + SAvgRuntime *pInput = (SAvgRuntime *)input; + if (pInput->valFlag != DATA_SET_FLAG) { // current input is null + continue; + } + + *sum += pInput->sum; + pCtx->numOfIteratedElems += pInput->num; + } +} + +/* + * there is only tiny difference between avg_dist_intern_function and sum_function, + * the output type + */ +static bool avg_dist_intern_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + double *retVal = pCtx->aOutputBuf; + + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) && pCtx->preAggVals.isSet) { + // Pre-aggregation + notNullElems = pCtx->size - pCtx->preAggVals.numOfNullPoints; + assert(notNullElems >= 0); + + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + *retVal += pCtx->preAggVals.sum; + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + *retVal += *(double *)&(pCtx->preAggVals.sum); + } else { + return false; + } + + goto _sum_over; + } + + void *pData = GET_INPUT_CHAR(pCtx); + + if (pCtx->hasNullValue) { + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int8_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int16_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int32_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + LIST_ADD_N(*retVal, pCtx->size, pData, int64_t, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + LIST_ADD_N(*retVal, pCtx->size, pData, double, notNullElems, pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + LIST_ADD_N(*retVal, pCtx->size, pData, float, notNullElems, pCtx->inputType); + } + } else { + notNullElems = pCtx->size; + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int8_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int16_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int32_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, int64_t); + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, double); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + TYPED_LIST_ADD(*retVal, pCtx->size, pData, float); + } + } + +_sum_over: + if (notNullElems > 0) { + SET_VAL(pCtx, notNullElems, 1); + SAvgRuntime *pAvgRuntime = (SAvgRuntime *)pCtx->aOutputBuf; + pAvgRuntime->num += notNullElems; + + // the delimiter of ',' is used to denote current buffer has output or not + pAvgRuntime->valFlag = DATA_SET_FLAG; + } + + return true; +} + +static bool avg_dist_intern_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + SAvgRuntime *pDest = (SAvgRuntime *)pCtx->aOutputBuf; + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + pDest->sum += *(int8_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + pDest->sum += *(int16_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + pDest->sum += *(int32_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + pDest->sum += *(int64_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + pDest->sum += *(double *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + pDest->sum += *(float *)pData; + } + + // restore sum and count of elements + pDest->num += 1; + pDest->valFlag = DATA_SET_FLAG; + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////// + +static bool minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, int32_t *notNullElems) { + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) && pCtx->preAggVals.isSet) { // pre-agg + /* data in current data block are qualified to the query */ + *notNullElems = pCtx->size - pCtx->preAggVals.numOfNullPoints; + assert(*notNullElems >= 0); + + void *tval = (void *)(isMin ? &pCtx->preAggVals.min : &pCtx->preAggVals.max); + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + int64_t val = *(int64_t *)tval; + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + int8_t *data = (int8_t *)pOutput; + *data = (*data < val) ^ isMin ? val : *data; + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + int16_t *data = (int16_t *)pOutput; + *data = (*data < val) ^ isMin ? val : *data; + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + int32_t *data = (int32_t *)pOutput; + *data = (*data < val) ^ isMin ? val : *data; +#if defined(_DEBUG_VIEW) + pTrace("max value updated according to pre-cal:%d", *data); +#endif + + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + int64_t *data = (int64_t *)pOutput; + *data = (*data < val) ^ isMin ? val : *data; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *data = (double *)pOutput; + double val = *(double *)tval; + + *data = (*data < val) ^ isMin ? val : *data; + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + float *data = (float *)pOutput; + double val = *(double *)tval; + + *data = (*data < val) ^ isMin ? val : *data; + } else { + return false; + } + + return true; + } + + void *p = GET_INPUT_CHAR(pCtx); + if (pCtx->hasNullValue) { + *notNullElems = 0; + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + TYPED_LOOPCHECK_N(int8_t, pOutput, p, pCtx->size, pCtx->inputType, isMin, *notNullElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + TYPED_LOOPCHECK_N(int16_t, pOutput, p, pCtx->size, pCtx->inputType, isMin, *notNullElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + int32_t *pData = p; + int32_t *retVal = pOutput; + + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(&pData[i], pCtx->inputType)) { + continue; + } + + *retVal = ((*retVal < pData[i]) ^ isMin) ? pData[i] : *retVal; + *notNullElems += 1; + } +#if defined(_DEBUG_VIEW) + pTrace("max value updated:%d", *retVal); +#endif + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + TYPED_LOOPCHECK_N(int64_t, pOutput, p, pCtx->size, pCtx->inputType, isMin, *notNullElems); + } + + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + TYPED_LOOPCHECK_N(double, pOutput, p, pCtx->size, pCtx->inputType, isMin, *notNullElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + TYPED_LOOPCHECK_N(float, pOutput, p, pCtx->size, pCtx->inputType, isMin, *notNullElems); + } + } else { + *notNullElems = pCtx->size; + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + TYPED_LOOPCHECK(int8_t, pOutput, p, pCtx->size, isMin); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + TYPED_LOOPCHECK(int16_t, pOutput, p, pCtx->size, isMin); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + int32_t *pData = p; + int32_t *retVal = pCtx->aOutputBuf; + + for (int32_t i = 0; i < pCtx->size; ++i) { + *retVal = ((*retVal < pData[i]) ^ isMin) ? pData[i] : *retVal; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + TYPED_LOOPCHECK(int64_t, pOutput, p, pCtx->size, isMin); + } + + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + TYPED_LOOPCHECK(double, pOutput, p, pCtx->size, isMin); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + TYPED_LOOPCHECK(float, pOutput, p, pCtx->size, isMin); + } + } + + return true; +} + +static void min_function_setup(SQLFunctionCtx *pCtx) { + void *retVal = pCtx->aOutputBuf; + memset(retVal, 0, pCtx->outputBytes); + + int32_t type = 0; + if (pCtx->inputType == TSDB_DATA_TYPE_BINARY) { + type = pCtx->outputType; + } else { + type = pCtx->inputType; + } + + switch (type) { + case TSDB_DATA_TYPE_INT: + *((int32_t *)retVal) = INT32_MAX; + break; + case TSDB_DATA_TYPE_FLOAT: + *((float *)retVal) = FLT_MAX; + break; + case TSDB_DATA_TYPE_DOUBLE: + *((double *)retVal) = DBL_MAX; + break; + case TSDB_DATA_TYPE_BIGINT: + *((int64_t *)retVal) = INT64_MAX; + break; + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t *)retVal) = INT16_MAX; + break; + case TSDB_DATA_TYPE_TINYINT: + *((int8_t *)retVal) = INT8_MAX; + break; + default: + pError("illegal data type:%d in min/max query", pCtx->inputType); + } + + INIT_VAL(pCtx); +} + +static void max_function_setup(SQLFunctionCtx *pCtx) { + void *retVal = pCtx->aOutputBuf; + memset(retVal, 0, pCtx->outputBytes); + + int32_t type = (pCtx->inputType == TSDB_DATA_TYPE_BINARY) ? pCtx->outputType : pCtx->inputType; + + switch (type) { + case TSDB_DATA_TYPE_INT: + *((int32_t *)retVal) = INT32_MIN; + break; + case TSDB_DATA_TYPE_FLOAT: + *((float *)retVal) = -FLT_MIN; + break; + case TSDB_DATA_TYPE_DOUBLE: + *((double *)retVal) = -DBL_MIN; + break; + case TSDB_DATA_TYPE_BIGINT: + *((int64_t *)retVal) = INT64_MIN; + break; + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t *)retVal) = INT16_MIN; + break; + case TSDB_DATA_TYPE_TINYINT: + *((int8_t *)retVal) = INT8_MIN; + break; + default: + pError("illegal data type:%d in min/max query", pCtx->inputType); + } + + INIT_VAL(pCtx); +} + +static bool min_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + bool ret = minMax_function(pCtx, pCtx->aOutputBuf, 1, ¬NullElems); + + SET_VAL(pCtx, notNullElems, 1); + return ret; +} + +static bool max_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + bool ret = minMax_function(pCtx, pCtx->aOutputBuf, 0, ¬NullElems); + + SET_VAL(pCtx, notNullElems, 1); + return ret; +} + +static bool min_dist_intern_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + minMax_function(pCtx, pCtx->aOutputBuf, 1, ¬NullElems); + + SET_VAL(pCtx, notNullElems, 1); + + if (notNullElems > 0) { + pCtx->aOutputBuf[pCtx->inputBytes] = DATA_SET_FLAG; + } + + return true; +} + +static bool max_dist_intern_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + minMax_function(pCtx, pCtx->aOutputBuf, 0, ¬NullElems); + + SET_VAL(pCtx, notNullElems, 1); + + if (notNullElems > 0) { + pCtx->aOutputBuf[pCtx->inputBytes] = DATA_SET_FLAG; + } + + return true; +} + +static int32_t minmax_dist_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *output, bool isMin) { + int32_t notNullElems = 0; + + int32_t type = (pCtx->inputType != TSDB_DATA_TYPE_BINARY) ? pCtx->inputType : pCtx->outputType; + for (int32_t i = 0; i < pCtx->size; ++i) { + char *input = GET_INPUT_CHAR_INDEX(pCtx, i); + if (input[bytes] != DATA_SET_FLAG) { + continue; + } + + notNullElems++; + switch (type) { + case TSDB_DATA_TYPE_TINYINT: { + int8_t v = *(int8_t *)input; + if ((*(int8_t *)output < v) ^ isMin) { + *(int8_t *)output = v; + } + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t v = *(int16_t *)input; + if ((*(int16_t *)output < v) ^ isMin) { + *(int16_t *)output = v; + } + break; + } + case TSDB_DATA_TYPE_INT: { + int32_t v = *(int32_t *)input; + if ((*(int32_t *)output < v) ^ isMin) { + *(int32_t *)output = v; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + float v = *(float *)input; + if ((*(float *)output < v) ^ isMin) { + *(float *)output = v; + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double v = *(double *)input; + if ((*(double *)output < v) ^ isMin) { + *(double *)output = v; + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t v = *(int64_t *)input; + if ((*(int64_t *)output < v) ^ isMin) { + *(int64_t *)output = v; + } + break; + }; + default: + break; + } + } + + return notNullElems; +} + +static void min_dist_merge(SQLFunctionCtx *pCtx) { + int32_t notNullElems = minmax_dist_merge_impl(pCtx, pCtx->inputBytes, pCtx->aOutputBuf, 1); + + SET_VAL(pCtx, notNullElems, 1); + if (notNullElems > 0) { + (pCtx->aOutputBuf)[pCtx->inputBytes] = DATA_SET_FLAG; + pCtx->numOfIteratedElems += notNullElems; + } +} + +static void min_dist_second_merge(SQLFunctionCtx *pCtx) { + char * output = (char *)pCtx->aOutputBuf; + int32_t notNullElems = minmax_dist_merge_impl(pCtx, pCtx->outputBytes, output, 1); + + SET_VAL(pCtx, notNullElems, 1); + if (notNullElems > 0) { + pCtx->numOfIteratedElems += notNullElems; + } +} + +static void max_dist_merge(SQLFunctionCtx *pCtx) { + int32_t notNullElems = minmax_dist_merge_impl(pCtx, pCtx->inputBytes, pCtx->aOutputBuf, 0); + + SET_VAL(pCtx, notNullElems, 1); + if (notNullElems > 0) { + (pCtx->aOutputBuf)[pCtx->inputBytes] = DATA_SET_FLAG; + pCtx->numOfIteratedElems += notNullElems; + } +} + +static void max_dist_second_merge(SQLFunctionCtx *pCtx) { + int32_t notNullElems = minmax_dist_merge_impl(pCtx, pCtx->outputBytes, pCtx->aOutputBuf, 0); + + SET_VAL(pCtx, notNullElems, 1); + if (notNullElems > 0) { + pCtx->numOfIteratedElems += notNullElems; + } +} + +static bool minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + int8_t *output = (int8_t *)pCtx->aOutputBuf; + int8_t i = *(int8_t *)pData; + *output = ((*output < i) ^ isMin) ? i : *output; + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + int16_t *output = pCtx->aOutputBuf; + int16_t i = *(int16_t *)pData; + *output = ((*output < i) ^ isMin) ? i : *output; + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + int32_t *output = pCtx->aOutputBuf; + int32_t i = *(int32_t *)pData; + *output = ((*output < i) ^ isMin) ? i : *output; + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + int64_t *output = pCtx->aOutputBuf; + int64_t i = *(int64_t *)pData; + *output = ((*output < i) ^ isMin) ? i : *output; + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + float *output = pCtx->aOutputBuf; + float i = *(float *)pData; + *output = ((*output < i) ^ isMin) ? i : *output; + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *output = pCtx->aOutputBuf; + double i = *(double *)pData; + *output = ((*output < i) ^ isMin) ? i : *output; + } + + return true; +} + +static bool min_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + return minMax_function_f(pCtx, index, 1); +} + +static bool max_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + return minMax_function_f(pCtx, index, 0); +} + +static bool min_dist_intern_function_f(SQLFunctionCtx *pCtx, int32_t index) { + min_function_f(pCtx, index); + if (pCtx->numOfIteratedElems) { + (pCtx->aOutputBuf)[pCtx->inputBytes] = DATA_SET_FLAG; + } + + return true; +} + +static bool max_dist_intern_function_f(SQLFunctionCtx *pCtx, int32_t index) { + max_function_f(pCtx, index); + if (pCtx->numOfIteratedElems) { + ((char *)pCtx->aOutputBuf)[pCtx->inputBytes] = DATA_SET_FLAG; + } + + return true; +} + +#define LOOP_STDDEV_IMPL(type, r, d, n, delta, tsdbType) \ + for (int32_t i = 0; i < (n); ++i) { \ + if (isNull((char *)&((type *)d)[i], tsdbType)) { \ + continue; \ + } \ + (r) += tPow(((type *)d)[i] - (delta)); \ + } + +static bool stddev_function(SQLFunctionCtx *pCtx) { + if (pCtx->currentStage == 0) { + /* the first stage to calculate average value */ + return sum_function(pCtx); + } else { + /* the second stage to calculate standard deviation */ + double *retVal = pCtx->aOutputBuf; + double avg = pCtx->intermediateBuf[1].dKey; + void * pData = GET_INPUT_CHAR(pCtx); + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(&((int32_t *)pData)[i], pCtx->inputType)) { + continue; + } + *retVal += tPow(((int32_t *)pData)[i] - avg); + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx->size, avg, pCtx->inputType); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx->size, avg, pCtx->inputType); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx->size, avg, pCtx->inputType); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx->size, avg, pCtx->inputType); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx->size, avg, pCtx->inputType); + break; + } + default: + pError("stddev function not support data type:%d", pCtx->inputType); + } + + return true; + } +} + +static bool stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) { + if (pCtx->currentStage == 0) { + /* the first stage is to calculate average value */ + return sum_function_f(pCtx, index); + } else { + /* the second stage to calculate standard deviation */ + double *retVal = pCtx->aOutputBuf; + double avg = pCtx->intermediateBuf[1].dKey; + + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + *retVal += tPow((*(int32_t *)pData) - avg); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + *retVal += tPow((*(float *)pData) - avg); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + *retVal += tPow((*(double *)pData) - avg); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + *retVal += tPow((*(int64_t *)pData) - avg); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + *retVal += tPow((*(int16_t *)pData) - avg); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + *retVal += tPow((*(int8_t *)pData) - avg); + break; + } + default: + pError("stddev function not support data type:%d", pCtx->inputType); + } + + return true; + } +} + +static bool stddev_next_step(SQLFunctionCtx *pCtx) { + if (pCtx->currentStage == 0) { + /* + * stddev is calculated in two stage: + * 1. get the average value of all points; + * 2. get final result, based on the average values; + * so, if this routine is in second stage, no further step is required + */ + + ++pCtx->currentStage; + avg_finalizer(pCtx); + + // save average value into tmpBuf, for second stage scan + pCtx->intermediateBuf[1].dKey = ((double *)pCtx->aOutputBuf)[0]; + *((double *)pCtx->aOutputBuf) = 0; + return true; + } + return false; +} + +static void stddev_finalizer(SQLFunctionCtx *pCtx) { + if (pCtx->numOfIteratedElems <= 0) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + return; + } + + double *retValue = (double *)pCtx->aOutputBuf; + *retValue = sqrt(*retValue / pCtx->numOfIteratedElems); +} + +static bool first_function(SQLFunctionCtx *pCtx) { + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) || pCtx->order == TSQL_SO_DESC) { + return true; + } + + int32_t notNullElems = 0; + + if (pCtx->hasNullValue) { + // handle the null value + for (int32_t i = 0; i < pCtx->size; ++i) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (isNull(data, pCtx->inputType)) { + continue; + } + + memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); + notNullElems++; + break; + } + } else { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, 0); + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + + notNullElems = pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return notNullElems <= 0; +} + +static bool first_function_f(SQLFunctionCtx *pCtx, int32_t index) { + if (pCtx->order == TSQL_SO_DESC) { + return true; + } + + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + + // no need to re-enter current data block + return false; +} + +static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { + char * retVal = pCtx->aOutputBuf; + int64_t *timestamp = pCtx->ptsList; + + if (IS_DATA_NOT_ASSIGNED(pCtx) || timestamp[index] < *(int64_t *)retVal) { + *((int64_t *)retVal) = timestamp[index]; + retVal[TSDB_KEYSIZE] = DATA_SET_FLAG; + memcpy(&retVal[TSDB_KEYSIZE + DATA_SET_FLAG_SIZE], pData, pCtx->inputBytes); + SET_DATA_ASSIGNED(pCtx); + } +} + +/* + * format of intermediate result: "timestamp,value" need to compare the timestamp in the first part (before the comma) to + * decide if the value is earlier than current intermediate result + */ +static bool first_dist_function(SQLFunctionCtx *pCtx) { + if (pCtx->size == 0) { + return true; + } + + /* + * do not to check data in the following cases: + * 1. data block that are not loaded + * 2. scan data files in desc order + */ + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) || pCtx->order == TSQL_SO_DESC) { + return true; + } + + int32_t notNullElems = 0; + + if (pCtx->hasNullValue) { + int32_t i = 0; + // find the first not null value + while (i < pCtx->size) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (!isNull(data, pCtx->inputType)) { + break; + } + i++; + } + + if (i < pCtx->size) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, i); + first_data_assign_impl(pCtx, pData, i); + notNullElems++; + } else { + // no data, all data are null + // do nothing + } + } else { + char *pData = GET_INPUT_CHAR(pCtx); + first_data_assign_impl(pCtx, pData, 0); + notNullElems = pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { + if (pCtx->size == 0) { + return true; + } + + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + if (pCtx->order == TSQL_SO_DESC) { + return true; + } + + first_data_assign_impl(pCtx, pData, 0); + + SET_VAL(pCtx, 1, 1); + return true; +} + +static void first_dist_merge(SQLFunctionCtx *pCtx) { + char *pData = GET_INPUT_CHAR(pCtx); + + if (pData[TSDB_KEYSIZE] != DATA_SET_FLAG) { + return; + } + + if (IS_DATA_NOT_ASSIGNED(pCtx) || *(int64_t *)pData < *(int64_t *)pCtx->aOutputBuf) { + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes + TSDB_KEYSIZE + DATA_SET_FLAG_SIZE); + SET_DATA_ASSIGNED(pCtx); + } + + pCtx->numOfIteratedElems += 1; +} + +static void first_dist_second_merge(SQLFunctionCtx *pCtx) { + char *pData = GET_INPUT_CHAR(pCtx); + + if (pData[TSDB_KEYSIZE] != DATA_SET_FLAG) { + return; + } + + /* + * NOTE: if secondary merge is not continue executed, the detection of if data assigned or not may be failed. + * Execution on other tables may change the value, since the SQLFunctionCtx is shared by all tables belonged + * to different groups + */ + if (pCtx->intermediateBuf[0].i64Key == 0 || *(int64_t *)pData < pCtx->intermediateBuf[0].i64Key) { + pCtx->intermediateBuf[0].i64Key = *(int64_t *)pData; + memcpy(pCtx->aOutputBuf, pData + TSDB_KEYSIZE + DATA_SET_FLAG_SIZE, pCtx->outputBytes); + } + + SET_VAL(pCtx, 1, 1); +} + +////////////////////////////////////////////////////////////////////////////////////////// +/* + * + * last function problem: + * 1. since the last block may be all null value, so, we simply access the last block is not valid + * each block need to be checked. + * 2. If numOfNullPoints == pBlock->numOfBlocks, the whole block is empty. Otherwise, there is at + * least one data in this block that is not null. + * 3. we access the data block in ascending order, so comparison is not needed. The later accessed + * block must have greater value of timestamp. + */ +static bool last_function(SQLFunctionCtx *pCtx) { + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) || pCtx->order == TSQL_SO_ASC) { + return true; + } + + int32_t notNullElems = 0; + + if (pCtx->hasNullValue) { + /* get the last not NULL records */ + for (int32_t i = pCtx->size - 1; i >= 0; --i) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (isNull(data, pCtx->inputType)) { + continue; + } + + memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes); + notNullElems++; + break; + } + } else { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, pCtx->size - 1); + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + + notNullElems = pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return notNullElems <= 0; +} + +static bool last_function_f(SQLFunctionCtx *pCtx, int32_t index) { + if (pCtx->order == TSQL_SO_ASC) { + return true; + } + + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + + // no need to re-enter current data block + return false; +} + +static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { + char * retVal = pCtx->aOutputBuf; + int64_t *timestamp = pCtx->ptsList; + + if (IS_DATA_NOT_ASSIGNED(pCtx) || *(int64_t *)retVal < timestamp[index]) { +#if defined(_DEBUG_VIEW) + pTrace("assign index:%d, ts:%lld, val:%d, ", index, timestamp[index], *(int32_t *)pData); +#endif + *((int64_t *)retVal) = timestamp[index]; + retVal[TSDB_KEYSIZE] = DATA_SET_FLAG; + memcpy(&retVal[TSDB_KEYSIZE + DATA_SET_FLAG_SIZE], pData, pCtx->inputBytes); + SET_DATA_ASSIGNED(pCtx); + } +} + +static bool last_dist_function(SQLFunctionCtx *pCtx) { + if (pCtx->size == 0) { + return true; + } + + /* + * 1. for scan data in asc order, no need to check data + * 2. for data blocks that are not loaded, no need to check data + */ + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus) || pCtx->order == TSQL_SO_ASC) { + return true; + } + + int32_t notNullElems = 0; + if (pCtx->hasNullValue) { + int32_t i = pCtx->size - 1; + while (i >= 0) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (!isNull(data, pCtx->inputType)) { + break; + } + + i--; + } + + if (i < 0) { + /* all data in current block are NULL, do nothing */ + } else { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, i); + last_data_assign_impl(pCtx, pData, i); + notNullElems++; + } + } else { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, pCtx->size - 1); + last_data_assign_impl(pCtx, pData, pCtx->size - 1); + notNullElems = pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { + if (pCtx->size == 0) { + return true; + } + + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + /* + * 1. for scan data in asc order, no need to check data + * 2. for data blocks that are not loaded, no need to check data + */ + if (pCtx->order == TSQL_SO_ASC) { + return true; + } + + last_data_assign_impl(pCtx, pData, index); + + SET_VAL(pCtx, 1, 1); + return true; +} + +static void last_dist_merge(SQLFunctionCtx *pCtx) { + char *pData = GET_INPUT_CHAR(pCtx); + char *retVal = pCtx->aOutputBuf; + + /* the input data is null */ + if (pData[TSDB_KEYSIZE] != DATA_SET_FLAG) { + return; + } + + if (IS_DATA_NOT_ASSIGNED(pCtx) || *(int64_t *)pData > *(int64_t *)retVal) { + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes + DATA_SET_FLAG_SIZE + TSDB_KEYSIZE); + SET_DATA_ASSIGNED(pCtx); + } + + pCtx->numOfIteratedElems += 1; +} + +/* + * in the secondary merge(local reduce), the output is limited by the + * final output size, so the main difference between last_dist_merge and second_merge + * is: the output data format in computing + */ +static void last_dist_second_merge(SQLFunctionCtx *pCtx) { + char *pData = GET_INPUT_CHAR(pCtx); + char *retVal = pCtx->aOutputBuf; + + /* the input data is null */ + if (pData[TSDB_KEYSIZE] != DATA_SET_FLAG) { + return; + } + + // todo refactor, pls refer to first_dist_second_merge for reasons + if (pCtx->intermediateBuf[0].i64Key == 0 || *(int64_t *)pData > pCtx->intermediateBuf[0].i64Key) { + pCtx->intermediateBuf[0].i64Key = *(int64_t *)pData; + memcpy(retVal, pData + TSDB_KEYSIZE + DATA_SET_FLAG_SIZE, pCtx->outputBytes); + } + + SET_VAL(pCtx, 1, 1); +} + +/* + * there must be data in last_row_dist function + */ +static int32_t last_row_dist_function(SQLFunctionCtx *pCtx) { + assert(pCtx->size == 1); + + char *pData = GET_INPUT_CHAR(pCtx); + *(TSKEY *)pCtx->aOutputBuf = pCtx->intermediateBuf[1].i64Key; + + SET_DATA_ASSIGNED(pCtx); + assignVal(pCtx->aOutputBuf + TSDB_KEYSIZE + DATA_SET_FLAG_SIZE, pData, pCtx->inputBytes, pCtx->inputType); + + SET_VAL(pCtx, pCtx->size, 1); + + return true; +} + +////////////////////////////////////////////////////////////////////////////////// + +/* + * intermediate parameters usage: + * 1. param[0]: maximum allowable results + * 2. param[1]: order by type (time or value) + * 3. param[2]: asc/desc order + * 4. param[3]: no use + * + * 1. intermediateBuf[0]: number of existed results + * 2. intermediateBuf[1]: results linklist + * 3. intermediateBuf[2]: no use + * 4. intermediateBuf[3]: reserved for tags + * + */ +static void top_bottom_function_setup(SQLFunctionCtx *pCtx) { + /* top-K value */ + pCtx->intermediateBuf[0].nType = TSDB_DATA_TYPE_BIGINT; + pCtx->intermediateBuf[0].i64Key = 0; + + /* + * keep the intermediate results during scan all data blocks + * in the format of: timestamp|value + */ + pCtx->intermediateBuf[1].pz = (tValuePair *)calloc(1, sizeof(tValuePair) * pCtx->param[0].i64Key); + pCtx->intermediateBuf[1].nType = TSDB_DATA_TYPE_BINARY; + + INIT_VAL(pCtx); +} + +#define top_add_elem_impl(list, len, val, ts, seg, mx, type) \ + do { \ + if (len < mx) { \ + if ((len) == 0 || (val) >= list[(len)-1].v.seg) { \ + (list)[len].v.nType = (type); \ + (list)[len].v.seg = (val); \ + (list)[len].timestamp = (ts); \ + } else { \ + int32_t i = (len)-1; \ + while (i >= 0 && (list)[i].v.seg > (val)) { \ + (list)[i + 1] = (list)[i]; \ + i -= 1; \ + } \ + (list)[i + 1].v.nType = (type); \ + (list)[i + 1].v.seg = (val); \ + (list)[i + 1].timestamp = (ts); \ + } \ + len += 1; \ + } else { \ + if ((val) > (list)[0].v.seg) { \ + int32_t i = 0; \ + while (i + 1 < (len) && (list)[i + 1].v.seg < (val)) { \ + (list)[i] = (list)[i + 1]; \ + i += 1; \ + } \ + (list)[i].v.nType = (type); \ + (list)[i].v.seg = (val); \ + (list)[i].timestamp = (ts); \ + } \ + } \ + } while (0); + +#define bottom_add_elem_impl(list, len, val, ts, seg, mx, type) \ + do { \ + if (len < mx) { \ + if ((len) == 0) { \ + (list)[len].v.nType = (type); \ + (list)[len].v.seg = (val); \ + (list)[len].timestamp = (ts); \ + } else { \ + int32_t i = (len)-1; \ + while (i >= 0 && (list)[i].v.seg < (val)) { \ + (list)[i + 1] = (list)[i]; \ + i -= 1; \ + } \ + (list)[i + 1].v.nType = (type); \ + (list)[i + 1].v.seg = (val); \ + (list)[i + 1].timestamp = (ts); \ + } \ + len += 1; \ + } else { \ + if ((val) < (list)[0].v.seg) { \ + int32_t i = 0; \ + while (i + 1 < (len) && (list)[i + 1].v.seg > (val)) { \ + (list)[i] = (list)[i + 1]; \ + i += 1; \ + } \ + (list)[i].v.nType = (type); \ + (list)[i].v.seg = (val); \ + (list)[i].timestamp = (ts); \ + } \ + } \ + } while (0); + +static void top_function_do_add(int32_t *len, int32_t maxLen, tValuePair *pList, void *pData, int64_t *timestamp, + uint16_t dataType) { + switch (dataType) { + case TSDB_DATA_TYPE_INT: { + tValuePair *intList = pList; + int32_t value = *(int32_t *)pData; + if (*len < maxLen) { + if (*len == 0 || value >= intList[*len - 1].v.i64Key) { + intList[*len].v.nType = dataType; + intList[*len].v.i64Key = value; + intList[*len].timestamp = *timestamp; + } else { + int32_t i = (*len) - 1; + while (i >= 0 && intList[i].v.i64Key > value) { + intList[i + 1] = intList[i]; + i -= 1; + } + intList[i + 1].v.nType = dataType; + intList[i + 1].v.i64Key = value; + intList[i + 1].timestamp = *timestamp; + } + (*len)++; + } else { + if (value > pList[0].v.i64Key) { + int32_t i = 0; + while (i + 1 < maxLen && pList[i + 1].v.i64Key < value) { + pList[i] = pList[i + 1]; + i += 1; + } + + pList[i].v.nType = dataType; + pList[i].v.i64Key = value; + pList[i].timestamp = *timestamp; + } + } + } break; + case TSDB_DATA_TYPE_DOUBLE: { + top_add_elem_impl(pList, *len, *(double *)pData, *timestamp, dKey, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + top_add_elem_impl(pList, *len, *(int64_t *)pData, *timestamp, i64Key, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + top_add_elem_impl(pList, *len, *(float *)pData, *timestamp, dKey, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + top_add_elem_impl(pList, *len, *(int16_t *)pData, *timestamp, i64Key, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + top_add_elem_impl(pList, *len, *(int8_t *)pData, *timestamp, i64Key, maxLen, dataType); + break; + } + default: + pError("top/bottom function not support data type:%d", dataType); + }; +} + +static void bottom_function_do_add(int32_t *len, int32_t maxLen, tValuePair *pList, void *pData, int64_t *timestamp, + uint16_t dataType) { + switch (dataType) { + case TSDB_DATA_TYPE_INT: { + int32_t value = *(int32_t *)pData; + if ((*len) < maxLen) { + if (*len == 0) { + pList[*len].v.i64Key = value; + pList[*len].timestamp = *timestamp; + } else { + int32_t i = (*len) - 1; + while (i >= 0 && pList[i].v.i64Key < value) { + pList[i + 1] = pList[i]; + i -= 1; + } + pList[i + 1].v.i64Key = value; + pList[i + 1].timestamp = *timestamp; + } + (*len)++; + } else { + if (value < pList[0].v.i64Key) { + int32_t i = 0; + while (i + 1 < maxLen && pList[i + 1].v.i64Key > value) { + pList[i] = pList[i + 1]; + i += 1; + } + pList[i].v.i64Key = value; + pList[i].timestamp = *timestamp; + } + } + } break; + case TSDB_DATA_TYPE_DOUBLE: { + bottom_add_elem_impl(pList, *len, *(double *)pData, *timestamp, dKey, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + bottom_add_elem_impl(pList, *len, *(int64_t *)pData, *timestamp, i64Key, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + bottom_add_elem_impl(pList, *len, *(float *)pData, *timestamp, dKey, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + bottom_add_elem_impl(pList, *len, *(int16_t *)pData, *timestamp, i64Key, maxLen, dataType); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + bottom_add_elem_impl(pList, *len, *(int8_t *)pData, *timestamp, i64Key, maxLen, dataType); + break; + } + }; +} + +static bool top_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(GET_INPUT_CHAR_INDEX(pCtx, i), pCtx->inputType)) { + continue; + } + notNullElems++; + top_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + GET_INPUT_CHAR_INDEX(pCtx, i), &pCtx->ptsList[i], pCtx->inputType); + } + } else { + notNullElems = pCtx->size; + for (int32_t i = 0; i < pCtx->size; ++i) { + top_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + GET_INPUT_CHAR_INDEX(pCtx, i), &pCtx->ptsList[i], pCtx->inputType); + } + } + + SET_VAL(pCtx, notNullElems, pCtx->intermediateBuf[0].i64Key); + return true; +} + +static bool top_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, pCtx->param[0].i64Key); + top_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, pData, + &pCtx->ptsList[index], pCtx->inputType); + + return true; +} + +static bool bottom_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + void * pData = GET_INPUT_CHAR(pCtx); + + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(GET_INPUT_CHAR_INDEX(pCtx, i), pCtx->inputType)) { + continue; + } + + bottom_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + (char *)pData + pCtx->inputBytes * i, &pCtx->ptsList[i], pCtx->inputType); + notNullElems++; + } + } else { + notNullElems = pCtx->size; + for (int32_t i = 0; i < pCtx->size; ++i) { + bottom_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + (char *)pData + pCtx->inputBytes * i, pCtx->ptsList + i, pCtx->inputType); + } + } + + SET_VAL(pCtx, notNullElems, pCtx->intermediateBuf[0].i64Key); + return true; +} + +static bool bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, pCtx->param[0].i64Key); + bottom_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, pData, + pCtx->ptsList + index, pCtx->inputType); + + return true; +} + +static int32_t resAscComparFn(const void *pLeft, const void *pRight) { + tValuePair *pLeftElem = (tValuePair *)pLeft; + tValuePair *pRightElem = (tValuePair *)pRight; + + if (pLeftElem->timestamp == pRightElem->timestamp) { + return 0; + } else { + return pLeftElem->timestamp > pRightElem->timestamp ? 1 : -1; + } +} + +static int32_t resDescComparFn(const void *pLeft, const void *pRight) { return -resAscComparFn(pLeft, pRight); } + +static int32_t resDataAscComparFn(const void *pLeft, const void *pRight) { + tValuePair *pLeftElem = (tValuePair *)pLeft; + tValuePair *pRightElem = (tValuePair *)pRight; + + int32_t type = pLeftElem->v.nType; + if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { + if (pLeftElem->v.dKey == pRightElem->v.dKey) { + return 0; + } else { + return pLeftElem->v.dKey > pRightElem->v.dKey ? 1 : -1; + } + } else { + if (pLeftElem->v.i64Key == pRightElem->v.i64Key) { + return 0; + } else { + return pLeftElem->v.i64Key > pRightElem->v.i64Key ? 1 : -1; + } + } +} + +static int32_t resDataDescComparFn(const void *pLeft, const void *pRight) { return -resDataAscComparFn(pLeft, pRight); } + +static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) { + tValuePair *tvp = (tValuePair *)pCtx->intermediateBuf[1].pz; + + // copy to result set buffer + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + + // in case of second stage merge, always use incremental output. + if (pCtx->currentStage == SECONDARY_STAGE_MERGE) { + step = QUERY_ASC_FORWARD_STEP; + } + + int32_t len = pCtx->numOfOutputElems; + + switch (type) { + case TSDB_DATA_TYPE_INT: { + int32_t *output = (int32_t *)pCtx->aOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].v.i64Key; + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t *output = (int64_t *)pCtx->aOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].v.i64Key; + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double *output = (double *)pCtx->aOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].v.dKey; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: { + float *output = (float *)pCtx->aOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].v.dKey; + } + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *output = (int16_t *)pCtx->aOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].v.i64Key; + } + break; + } + case TSDB_DATA_TYPE_TINYINT: { + int8_t *output = (int8_t *)pCtx->aOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].v.i64Key; + } + break; + } + default: { + pError("top/bottom function not support data type:%d", pCtx->inputType); + return; + } + } + + // set the output timestamp of each record. + TSKEY *output = pCtx->ptsOutputBuf; + for (int32_t i = 0; i < len; ++i, output += step) { + *output = tvp[i].timestamp; + } +} + +static void top_bottom_function_finalizer(SQLFunctionCtx *pCtx) { + /* + * data in temporary list is less than the required count not enough qualified number of results + */ + if (pCtx->intermediateBuf[0].i64Key < pCtx->param[0].i64Key) { + pCtx->numOfOutputElems = pCtx->intermediateBuf[0].i64Key; + } + + tValuePair *tvp = (tValuePair *)pCtx->intermediateBuf[1].pz; + + // user specify the order of output by sort the result according to timestamp + if (pCtx->param[1].i64Key == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + __compar_fn_t comparator = (pCtx->param[2].i64Key == TSQL_SO_ASC) ? resAscComparFn : resDescComparFn; + qsort(tvp, pCtx->numOfOutputElems, sizeof(tValuePair), comparator); + } else if (pCtx->param[1].i64Key > PRIMARYKEY_TIMESTAMP_COL_INDEX) { + __compar_fn_t comparator = (pCtx->param[2].i64Key == TSQL_SO_ASC) ? resDataAscComparFn : resDataDescComparFn; + qsort(tvp, pCtx->numOfOutputElems, sizeof(tValuePair), comparator); + } + + int32_t type = pCtx->outputType == TSDB_DATA_TYPE_BINARY ? pCtx->inputBytes : pCtx->outputType; + copyTopBotRes(pCtx, type); + + tfree(pCtx->intermediateBuf[1].pz); +} + +typedef struct STopBotRuntime { + int32_t num; + tValuePair res[]; +} STopBotRuntime; + +bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval) { + int32_t numOfExistsRes = 0; + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { + numOfExistsRes = (int32_t)pCtx->intermediateBuf[0].i64Key; + } else { + STopBotRuntime *pTpBtRuntime = (STopBotRuntime *)pCtx->aOutputBuf; + numOfExistsRes = pTpBtRuntime->num; + } + + if (numOfExistsRes < pCtx->param[0].i64Key) { + /* required number of results are not reached, continue load data block */ + return true; + } else { // + tValuePair *pRes = pCtx->intermediateBuf[1].pz; + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_TOP_DST) { + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + return *(int8_t *)maxval > pRes[0].v.i64Key; + case TSDB_DATA_TYPE_SMALLINT: + return *(int16_t *)maxval > pRes[0].v.i64Key; + case TSDB_DATA_TYPE_INT: + return *(int32_t *)maxval > pRes[0].v.i64Key; + case TSDB_DATA_TYPE_BIGINT: + return *(int64_t *)maxval > pRes[0].v.i64Key; + case TSDB_DATA_TYPE_FLOAT: + return *(float *)maxval > pRes[0].v.dKey; + case TSDB_DATA_TYPE_DOUBLE: + return *(double *)maxval > pRes[0].v.dKey; + default: + return true; + } + } else { + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + return *(int8_t *)minval < pRes[0].v.i64Key; + case TSDB_DATA_TYPE_SMALLINT: + return *(int16_t *)minval < pRes[0].v.i64Key; + case TSDB_DATA_TYPE_INT: + return *(int32_t *)minval < pRes[0].v.i64Key; + case TSDB_DATA_TYPE_BIGINT: + return *(int64_t *)minval < pRes[0].v.i64Key; + case TSDB_DATA_TYPE_FLOAT: + return *(float *)minval < pRes[0].v.dKey; + case TSDB_DATA_TYPE_DOUBLE: + return *(double *)minval < pRes[0].v.dKey; + default: + return true; + } + } + } +} + +/* + * intermediate parameters usage: + * 1. param[0]: maximum allowable results + * 2. param[1]: order by type (time or value) + * 3. param[2]: asc/desc order + * 4. param[3]: no use + * + * 1. intermediateBuf[0]: number of existed results + * 2. intermediateBuf[1]: results linklist + * 3. intermediateBuf[2]: no use + * 4. intermediateBuf[3]: reserved for tags + * + */ +static void top_bottom_dist_function_setup(SQLFunctionCtx *pCtx) { + /* top-K value */ + pCtx->intermediateBuf[0].nType = TSDB_DATA_TYPE_BIGINT; + pCtx->intermediateBuf[0].i64Key = 0; + + if (pCtx->outputType == TSDB_DATA_TYPE_BINARY) { + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + pCtx->intermediateBuf[1].pz = pRes->res; + pRes->num = 0; + + memset(pRes, 0, sizeof(STopBotRuntime) + pCtx->param[0].i64Key * sizeof(tValuePair)); + } else { + /* + * keep the intermediate results during scan all data blocks in the format of: timestamp|value + */ + int32_t size = sizeof(tValuePair) * pCtx->param[0].i64Key; + pCtx->intermediateBuf[1].pz = (tValuePair *)calloc(1, size); + pCtx->intermediateBuf[1].nType = TSDB_DATA_TYPE_BINARY; + pCtx->intermediateBuf[1].nLen = size; + } + + INIT_VAL(pCtx); +} + +static bool top_dist_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(GET_INPUT_CHAR_INDEX(pCtx, i), pCtx->inputType)) { + continue; + } + notNullElems++; + top_function_do_add(&pRes->num, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, GET_INPUT_CHAR_INDEX(pCtx, i), + &pCtx->ptsList[i], pCtx->inputType); + } + } else { + notNullElems = pCtx->size; + for (int32_t i = 0; i < pCtx->size; ++i) { + top_function_do_add(&pRes->num, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, GET_INPUT_CHAR_INDEX(pCtx, i), + &pCtx->ptsList[i], pCtx->inputType); + } + } + + /* treat the result as only one result */ + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool top_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + + SET_VAL(pCtx, 1, 1); + top_function_do_add(&pRes->num, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, pData, &pCtx->ptsList[index], + pCtx->inputType); + + return true; +} + +static void top_dist_merge(SQLFunctionCtx *pCtx) { + char *input = GET_INPUT_CHAR(pCtx); + + STopBotRuntime *pInput = (STopBotRuntime *)input; + if (pInput->num <= 0) { + return; + } + + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + for (int32_t i = 0; i < pInput->num; ++i) { + top_function_do_add(&pRes->num, pCtx->param[0].i64Key, pRes->res, &pInput->res[i].v.i64Key, + &pInput->res[i].timestamp, pCtx->inputType); + } + + pCtx->numOfIteratedElems = pRes->num; +} + +static void top_dist_second_merge(SQLFunctionCtx *pCtx) { + STopBotRuntime *pInput = (STopBotRuntime *)GET_INPUT_CHAR(pCtx); + + /* the intermediate result is binary, we only use the output data type */ + for (int32_t i = 0; i < pInput->num; ++i) { + top_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + &pInput->res[i].v.i64Key, &pInput->res[i].timestamp, pCtx->outputType); + } + + SET_VAL(pCtx, pInput->num, pCtx->intermediateBuf[0].i64Key); +} + +static bool bottom_dist_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(GET_INPUT_CHAR_INDEX(pCtx, i), pCtx->inputType)) { + continue; + } + notNullElems++; + bottom_function_do_add(&pRes->num, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + GET_INPUT_CHAR_INDEX(pCtx, i), &pCtx->ptsList[i], pCtx->inputType); + } + } else { + notNullElems = pCtx->size; + for (int32_t i = 0; i < pCtx->size; ++i) { + bottom_function_do_add(&pRes->num, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + GET_INPUT_CHAR_INDEX(pCtx, i), &pCtx->ptsList[i], pCtx->inputType); + } + } + + /* treat the result as only one result */ + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool bottom_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + + SET_VAL(pCtx, 1, 1); + bottom_function_do_add(&pRes->num, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, pData, &pCtx->ptsList[index], + pCtx->inputType); + + return true; +} + +static void bottom_dist_merge(SQLFunctionCtx *pCtx) { + STopBotRuntime *pInput = (STopBotRuntime *)GET_INPUT_CHAR(pCtx); + if (pInput->num <= 0) { + return; + } + + STopBotRuntime *pRes = (STopBotRuntime *)pCtx->aOutputBuf; + for (int32_t i = 0; i < pInput->num; ++i) { + bottom_function_do_add(&pRes->num, pCtx->param[0].i64Key, pRes->res, &pInput->res[i].v.i64Key, + &pInput->res[i].timestamp, pCtx->inputType); + } + + pCtx->numOfIteratedElems = pRes->num; +} + +static void bottom_dist_second_merge(SQLFunctionCtx *pCtx) { + STopBotRuntime *pInput = (STopBotRuntime *)GET_INPUT_CHAR(pCtx); + + /* the intermediate result is binary, we only use the output data type */ + for (int32_t i = 0; i < pInput->num; ++i) { + bottom_function_do_add(&pCtx->intermediateBuf[0].i64Key, pCtx->param[0].i64Key, pCtx->intermediateBuf[1].pz, + &pInput->res[i].v.i64Key, &pInput->res[i].timestamp, pCtx->outputType); + } + + SET_VAL(pCtx, pInput->num, pCtx->intermediateBuf[0].i64Key); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// + +static void percentile_function_setup(SQLFunctionCtx *pCtx) { + pCtx->intermediateBuf[0].nType = TSDB_DATA_TYPE_DOUBLE; + + const int32_t MAX_AVAILABLE_BUFFER_SIZE = 1 << 20; + const int32_t NUMOFCOLS = 1; + + if (pCtx->intermediateBuf[2].pz != NULL) { + assert(pCtx->intermediateBuf[1].pz != NULL); + return; + } + + SSchema field[1] = { + {pCtx->inputType, "dummyCol", 0, pCtx->inputBytes}, + }; + tColModel *pModel = tColModelCreate(field, 1, 1000); + int32_t orderIdx = 0; + + // tOrderDesc object + pCtx->intermediateBuf[2].pz = tOrderDesCreate(&orderIdx, NUMOFCOLS, pModel, TSQL_SO_DESC); + + tMemBucketCreate((tMemBucket **)&(pCtx->intermediateBuf[1].pz), 1024, MAX_AVAILABLE_BUFFER_SIZE, pCtx->inputBytes, + pCtx->inputType, (tOrderDescriptor *)pCtx->intermediateBuf[2].pz); + + INIT_VAL(pCtx); +} + +static bool percentile_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (isNull(data, pCtx->inputType)) { + continue; + } + + notNullElems += 1; + tMemBucketPut((tMemBucket *)(pCtx->intermediateBuf[1].pz), data, 1); + } + } else { + for (int32_t i = 0; i < pCtx->size; ++i) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + tMemBucketPut((tMemBucket *)(pCtx->intermediateBuf[1].pz), data, 1); + } + + notNullElems += pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + tMemBucketPut((tMemBucket *)(pCtx->intermediateBuf[1].pz), pData, 1); + return true; +} + +static void percentile_finalizer(SQLFunctionCtx *pCtx) { + double v = pCtx->param[0].nType == TSDB_DATA_TYPE_INT ? pCtx->param[0].i64Key : pCtx->param[0].dKey; + + if (pCtx->numOfIteratedElems > 0) { // check for null + double val = getPercentile(pCtx->intermediateBuf[1].pz, v); + *((double *)pCtx->aOutputBuf) = val; + } else { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + } + + tMemBucketDestroy((tMemBucket **)&(pCtx->intermediateBuf[1].pz)); + tOrderDescDestroy(pCtx->intermediateBuf[2].pz); + + assert(pCtx->intermediateBuf[1].pz == NULL); +} + +static bool apercentile_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (isNull(data, pCtx->inputType)) { + continue; + } + + notNullElems += 1; + double v = 0; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = *(int8_t *)data; + break; + case TSDB_DATA_TYPE_SMALLINT: + v = *(int16_t *)data; + break; + case TSDB_DATA_TYPE_BIGINT: + v = *(int64_t *)data; + break; + case TSDB_DATA_TYPE_FLOAT: + v = *(float *)data; + break; + case TSDB_DATA_TYPE_DOUBLE: + v = *(double *)data; + break; + default: + v = *(int32_t *)data; + break; + } + + tHistogramAdd(&pCtx->param[1].pz, v); + } + } else { + for (int32_t i = 0; i < pCtx->size; ++i) { + char * data = GET_INPUT_CHAR_INDEX(pCtx, i); + double v = 0; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = *(int8_t *)data; + break; + case TSDB_DATA_TYPE_SMALLINT: + v = *(int16_t *)data; + break; + case TSDB_DATA_TYPE_BIGINT: + v = *(int64_t *)data; + break; + case TSDB_DATA_TYPE_FLOAT: + v = *(float *)data; + break; + case TSDB_DATA_TYPE_DOUBLE: + v = *(double *)data; + break; + default: + v = *(int32_t *)data; + break; + } + + tHistogramAdd(&pCtx->param[1].pz, v); + } + + notNullElems += pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + double v = 0; + SET_VAL(pCtx, 1, 1); + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = *(int8_t *)pData; + break; + case TSDB_DATA_TYPE_SMALLINT: + v = *(int16_t *)pData; + break; + case TSDB_DATA_TYPE_BIGINT: + v = *(int64_t *)pData; + break; + case TSDB_DATA_TYPE_FLOAT: + v = *(float *)pData; + break; + case TSDB_DATA_TYPE_DOUBLE: + v = *(double *)pData; + break; + default: + v = *(int32_t *)pData; + break; + } + + tHistogramAdd(&pCtx->param[1].pz, v); + return true; +} + +static void apercentile_finalizer(SQLFunctionCtx *pCtx) { + double v = pCtx->param[0].nType == TSDB_DATA_TYPE_INT ? pCtx->param[0].i64Key : pCtx->param[0].dKey; + + if (pCtx->numOfIteratedElems > 0) { // check for null + double ratio[] = {v}; + double *res = tHistogramUniform(pCtx->param[1].pz, ratio, 1); + memcpy(pCtx->aOutputBuf, res, sizeof(double)); + free(res); + } else { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + } + + SET_VAL(pCtx, pCtx->numOfIteratedElems, 1); + tHistogramDestroy(&(pCtx->param[1].pz)); +} + +static void apercentile_dist_function_setup(SQLFunctionCtx *pCtx) { + function_setup(pCtx); + if (pCtx->outputType == TSDB_DATA_TYPE_BINARY) { + tHistogramCreateFrom(pCtx->aOutputBuf, MAX_HISTOGRAM_BIN); + } else { /* for secondary merge at client-side */ + pCtx->param[1].pz = tHistogramCreate(MAX_HISTOGRAM_BIN); + } +} + +static bool apercentile_dist_intern_function(SQLFunctionCtx *pCtx) { + int32_t notNullElems = 0; + + SHistogramInfo *pHisto = (SHistogramInfo *)pCtx->aOutputBuf; + + if (pCtx->hasNullValue) { + for (int32_t i = 0; i < pCtx->size; ++i) { + char *data = GET_INPUT_CHAR_INDEX(pCtx, i); + if (isNull(data, pCtx->inputType)) { + continue; + } + + notNullElems += 1; + double v = 0; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = *(int8_t *)data; + break; + case TSDB_DATA_TYPE_SMALLINT: + v = *(int16_t *)data; + break; + case TSDB_DATA_TYPE_BIGINT: + v = *(int64_t *)data; + break; + case TSDB_DATA_TYPE_FLOAT: + v = *(float *)data; + break; + case TSDB_DATA_TYPE_DOUBLE: + v = *(double *)data; + break; + default: + v = *(int32_t *)data; + break; + } + + tHistogramAdd(&pHisto, v); + } + } else { + for (int32_t i = 0; i < pCtx->size; ++i) { + char * data = GET_INPUT_CHAR_INDEX(pCtx, i); + double v = 0; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = *(int8_t *)data; + break; + case TSDB_DATA_TYPE_SMALLINT: + v = *(int16_t *)data; + break; + case TSDB_DATA_TYPE_BIGINT: + v = *(int64_t *)data; + break; + case TSDB_DATA_TYPE_FLOAT: + v = *(float *)data; + break; + case TSDB_DATA_TYPE_DOUBLE: + v = *(double *)data; + break; + default: + v = *(int32_t *)data; + break; + } + + tHistogramAdd(&pHisto, v); + } + + notNullElems += pCtx->size; + } + + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool apercentile_dist_intern_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SHistogramInfo *pHisto = (SHistogramInfo *)pCtx->aOutputBuf; + SET_VAL(pCtx, 1, 1); + + double v = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_TINYINT: + v = *(int8_t *)pData; + break; + case TSDB_DATA_TYPE_SMALLINT: + v = *(int16_t *)pData; + break; + case TSDB_DATA_TYPE_BIGINT: + v = *(int64_t *)pData; + break; + case TSDB_DATA_TYPE_FLOAT: + v = *(float *)pData; + break; + case TSDB_DATA_TYPE_DOUBLE: + v = *(double *)pData; + break; + default: + v = *(int32_t *)pData; + break; + } + + tHistogramAdd(&pHisto, v); + return true; +} + +static void apercentile_dist_merge(SQLFunctionCtx *pCtx) { + SHistogramInfo *pInput = (SHistogramInfo *)GET_INPUT_CHAR(pCtx); + if (pInput->numOfElems <= 0) { + return; + } + + size_t size = sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1); + + SHistogramInfo *pHisto = (SHistogramInfo *)pCtx->aOutputBuf; + if (pHisto->numOfElems <= 0) { + char *ptr = pHisto->elems; + memcpy(pHisto, pInput, size); + pHisto->elems = ptr; + } else { + pInput->elems = (char *)pInput + sizeof(SHistogramInfo); + pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + + SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput, MAX_HISTOGRAM_BIN); + memcpy(pHisto, pRes, size); + + pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + tHistogramDestroy(&pRes); + } + + pCtx->numOfIteratedElems += 1; +} + +static void apercentile_dist_second_merge(SQLFunctionCtx *pCtx) { + SHistogramInfo *pInput = (SHistogramInfo *)GET_INPUT_CHAR(pCtx); + if (pInput->numOfElems <= 0) { + return; + } + + SHistogramInfo *pHisto = (SHistogramInfo *)pCtx->param[1].pz; + if (pHisto->numOfElems <= 0) { + memcpy(pHisto, pInput, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1)); + pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + } else { + pInput->elems = (char *)pInput + sizeof(SHistogramInfo); + pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo); + + SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput, MAX_HISTOGRAM_BIN); + tHistogramDestroy(&pCtx->param[1].pz); + pCtx->param[1].pz = pRes; + } + + pCtx->numOfIteratedElems += 1; +} + +static void leastsquares_function_setup(SQLFunctionCtx *pCtx) { + if (pCtx->intermediateBuf[1].pz != NULL) { + return; + } + // 2*3 matrix + INIT_VAL(pCtx); + pCtx->intermediateBuf[1].pz = (double(*)[3])calloc(1, sizeof(double) * 2 * 3); + + // set the start x-axle value + pCtx->intermediateBuf[0].dKey = pCtx->param[0].dKey; +} + +#define LEASTSQR_CAL(p, x, y, index, step) \ + do { \ + (p)[0][0] += (double)(x) * (x); \ + (p)[0][1] += (double)(x); \ + (p)[0][2] += (double)(x) * (y)[index]; \ + (p)[1][2] += (y)[index]; \ + (x) += step; \ + } while (0) + +#define LEASTSQR_CAL_LOOP(ctx, param, x, y, tsdbType, n, step) \ + for (int32_t i = 0; i < ctx->size; ++i) { \ + if (isNull((char *)&(y)[i], tsdbType)) { \ + continue; \ + } \ + n++; \ + LEASTSQR_CAL(param, x, y, i, step); \ + } + +static bool leastsquares_function(SQLFunctionCtx *pCtx) { + double(*param)[3] = (double(*)[3])pCtx->intermediateBuf[1].pz; + + double x = pCtx->intermediateBuf[0].dKey; + void * pData = GET_INPUT_CHAR(pCtx); + + int32_t numOfElem = 0; + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + int32_t *p = pData; + // LEASTSQR_CAL_LOOP(pCtx, param, pParamData, p); + for (int32_t i = 0; i < pCtx->size; ++i) { + if (isNull(p, pCtx->inputType)) { + continue; + } + + param[0][0] += x * x; + param[0][1] += x; + param[0][2] += x * p[i]; + param[1][2] += p[i]; + + x += pCtx->param[1].dKey; + numOfElem++; + } + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + int64_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + double *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + float *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + int8_t *p = pData; + LEASTSQR_CAL_LOOP(pCtx, param, x, p, pCtx->inputType, numOfElem, pCtx->param[1].dKey); + break; + }; + } + + pCtx->intermediateBuf[0].dKey = x; + + SET_VAL(pCtx, numOfElem, 1); + return true; +} + +static bool leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + double(*param)[3] = (double(*)[3])pCtx->intermediateBuf[1].pz; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + int32_t *p = pData; + LEASTSQR_CAL(param, pCtx->intermediateBuf[0].dKey, p, index, pCtx->param[1].dKey); + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + int8_t *p = pData; + LEASTSQR_CAL(param, pCtx->intermediateBuf[0].dKey, p, index, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *p = pData; + LEASTSQR_CAL(param, pCtx->intermediateBuf[0].dKey, p, index, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t *p = pData; + LEASTSQR_CAL(param, pCtx->intermediateBuf[0].dKey, p, index, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + float *p = pData; + LEASTSQR_CAL(param, pCtx->intermediateBuf[0].dKey, p, index, pCtx->param[1].dKey); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double *p = pData; + LEASTSQR_CAL(param, pCtx->intermediateBuf[0].dKey, p, index, pCtx->param[1].dKey); + break; + } + default: + pError("error data type in leastsquares function:%d", pCtx->inputType); + }; + + return true; +} + +static void leastsquare_finalizer(SQLFunctionCtx *pCtx) { + /* no data in query */ + if (pCtx->numOfIteratedElems <= 0) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + tfree(pCtx->intermediateBuf[1].pz); + + return; + } + + double(*param)[3] = (double(*)[3])pCtx->intermediateBuf[1].pz; + param[1][1] = pCtx->numOfIteratedElems; + param[1][0] = param[0][1]; + + param[0][0] -= param[1][0] * (param[0][1] / param[1][1]); + param[0][2] -= param[1][2] * (param[0][1] / param[1][1]); + param[0][1] = 0; + param[1][2] -= param[0][2] * (param[1][0] / param[0][0]); + param[1][0] = 0; + param[0][2] /= param[0][0]; + + param[1][2] /= param[1][1]; + + sprintf(pCtx->aOutputBuf, "(%lf, %lf)", param[0][2], param[1][2]); + tfree(pCtx->intermediateBuf[1].pz); +} + +static bool date_col_output_function(SQLFunctionCtx *pCtx) { + if (pCtx->scanFlag == SUPPLEMENTARY_SCAN) { + return true; + } + + SET_VAL(pCtx, pCtx->size, 1); + *(int64_t *)(pCtx->aOutputBuf) = pCtx->nStartQueryTimestamp; + return true; +} + +static bool col_project_function(SQLFunctionCtx *pCtx) { + INC_INIT_VAL(pCtx, pCtx->size, pCtx->size); + + char *pDest = 0; + if (pCtx->order == TSQL_SO_ASC) { + pDest = pCtx->aOutputBuf; + } else { + pDest = pCtx->aOutputBuf - (pCtx->size - 1) * pCtx->inputBytes; + } + + char *pData = GET_INPUT_CHAR(pCtx); + memcpy(pDest, pData, (size_t)pCtx->size * pCtx->inputBytes); + + pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + return true; +} + +static bool col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { + INC_INIT_VAL(pCtx, 1, 1); + + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes); + + pCtx->aOutputBuf += pCtx->inputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + return true; +} + +/** + * only used for tag projection query in select clause + * @param pCtx + * @return + */ +static bool tag_project_function(SQLFunctionCtx *pCtx) { + INC_INIT_VAL(pCtx, pCtx->size, pCtx->size); + + assert(pCtx->inputBytes == pCtx->outputBytes); + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + + for (int32_t i = 0; i < pCtx->size; ++i) { + tVariantDump(&pCtx->intermediateBuf[3], pCtx->aOutputBuf, pCtx->intermediateBuf[3].nType); + pCtx->aOutputBuf += pCtx->outputBytes * factor; + } + return true; +} + +static bool tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { + INC_INIT_VAL(pCtx, 1, 1); + tVariantDump(&pCtx->intermediateBuf[3], pCtx->aOutputBuf, pCtx->intermediateBuf[3].nType); + pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + return true; +} + +/** + * used in group by clause. when applying group by tags, the tags value is + * assign + * by using tag function. + * NOTE: there is only ONE output for ONE query range + * @param pCtx + * @return + */ +static bool tag_function(SQLFunctionCtx *pCtx) { + SET_VAL(pCtx, 1, 1); + tVariantDump(&pCtx->intermediateBuf[3], pCtx->aOutputBuf, pCtx->intermediateBuf[3].nType); + return true; +} + +static bool tag_function_f(SQLFunctionCtx *pCtx, int32_t index) { + SET_VAL(pCtx, 1, 1); + tVariantDump(&pCtx->intermediateBuf[3], pCtx->aOutputBuf, pCtx->intermediateBuf[3].nType); + return true; +} + +static bool copy_function(SQLFunctionCtx *pCtx) { + SET_VAL(pCtx, pCtx->size, 1); + + char *pData = GET_INPUT_CHAR(pCtx); + assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType); + return false; +} + +enum { + INITIAL_VALUE_NOT_ASSIGNED = 0, +}; + +static void diff_function_setup(SQLFunctionCtx *pCtx) { + function_setup(pCtx); + pCtx->intermediateBuf[1].nType = INITIAL_VALUE_NOT_ASSIGNED; + // diff function require the value is set to -1 +} + +// TODO difference in date column +static bool diff_function(SQLFunctionCtx *pCtx) { + pCtx->numOfIteratedElems += pCtx->size; + + void *pData = GET_INPUT_CHAR(pCtx); + bool isFirstBlock = (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED); + + int32_t notNullElems = 0; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + + int32_t i = (pCtx->order == TSQL_SO_ASC) ? 0 : pCtx->size - 1; + TSKEY * pTimestamp = pCtx->ptsOutputBuf; + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + int32_t *pDd = (int32_t *)pData; + int32_t *pOutput = (int32_t *)pCtx->aOutputBuf; + + for (; i < pCtx->size && i >= 0; i += step) { + if (isNull(&pDd[i], pCtx->inputType)) { + continue; + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + } else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) { + *pOutput = pDd[i] - pCtx->intermediateBuf[1].i64Key; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } else { + *pOutput = pDd[i] - pDd[i - step]; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } + + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + notNullElems++; + } + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + int64_t *pDd = (int64_t *)pData; + int64_t *pOutput = (int64_t *)pCtx->aOutputBuf; + + for (; i < pCtx->size && i >= 0; i += step) { + if (isNull(&pDd[i], pCtx->inputType)) { + continue; + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + } else if (i == 0) { + *pOutput = pDd[i] - pCtx->intermediateBuf[1].i64Key; + *pTimestamp = pCtx->ptsList[i]; + + pOutput += step; + pTimestamp += step; + } else { + *pOutput = pDd[i] - pDd[i - 1]; + *pTimestamp = pCtx->ptsList[i]; + + pOutput += step; + pTimestamp += step; + } + + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + notNullElems++; + } + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + double *pDd = (double *)pData; + double *pOutput = (double *)pCtx->aOutputBuf; + + for (; i < pCtx->size && i >= 0; i += step) { + if (isNull(&pDd[i], pCtx->inputType)) { + continue; + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].dKey = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + } else if (i == 0) { + *pOutput = pDd[i] - pCtx->intermediateBuf[1].dKey; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } else { + *pOutput = pDd[i] - pDd[i - 1]; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } + + memcpy(&pCtx->intermediateBuf[1].i64Key, &pDd[i], pCtx->inputBytes); + pCtx->intermediateBuf[1].nType = pCtx->inputType; + notNullElems++; + } + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + float *pDd = (float *)pData; + float *pOutput = (float *)pCtx->aOutputBuf; + + for (; i < pCtx->size && i >= 0; i += step) { + if (isNull(&pDd[i], pCtx->inputType)) { + continue; + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].dKey = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + } else if (i == 0) { + *pOutput = pDd[i] - pCtx->intermediateBuf[1].dKey; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } else { + *pOutput = pDd[i] - pDd[i - 1]; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } + + // keep the last value, the remain may be all null + pCtx->intermediateBuf[1].dKey = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + notNullElems++; + } + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t *pDd = (int16_t *)pData; + int16_t *pOutput = (int16_t *)pCtx->aOutputBuf; + + for (; i < pCtx->size && i >= 0; i += step) { + if (isNull(&pDd[i], pCtx->inputType)) { + continue; + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + } else if (i == 0) { + *pOutput = pDd[i] - pCtx->intermediateBuf[1].i64Key; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } else { + *pOutput = pDd[i] - pDd[i - 1]; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } + + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + notNullElems++; + } + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + int8_t *pDd = (int8_t *)pData; + int8_t *pOutput = (int8_t *)pCtx->aOutputBuf; + + for (; i < pCtx->size && i >= 0; i += step) { + if (isNull((char *)&pDd[i], pCtx->inputType)) { + continue; + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + } else if (i == 0) { + *pOutput = pDd[i] - pCtx->intermediateBuf[1].i64Key; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } else { + *pOutput = pDd[i] - pDd[i - 1]; + *pTimestamp = pCtx->ptsList[i]; + pOutput += step; + pTimestamp += step; + } + + pCtx->intermediateBuf[1].i64Key = pDd[i]; + pCtx->intermediateBuf[1].nType = pCtx->inputType; + notNullElems++; + } + break; + }; + default: + pError("error input type"); + } + + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED || + notNullElems <= 0) { // initial value is not set yet + /* + * 1. current block and blocks before are full of null + * 2. current block may be null value + */ + assert(pCtx->hasNullValue); + } else { + if (isFirstBlock) { + pCtx->numOfOutputElems = notNullElems - 1; + } else { + pCtx->numOfOutputElems += notNullElems; + } + + int32_t forwardStep = (isFirstBlock) ? notNullElems - 1 : notNullElems; + + pCtx->aOutputBuf = pCtx->aOutputBuf + forwardStep * pCtx->outputBytes * step; + pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE * step; + } + + return true; +} + +#define TYPE_DIFF_IMPL(ctx, d, type) \ + do { \ + if (ctx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { \ + ctx->intermediateBuf[1].nType = ctx->inputType; \ + *(type *)&ctx->intermediateBuf[1].i64Key = *(type *)d; \ + } else { \ + *(type *)ctx->aOutputBuf = *(type *)d - (*(type *)(&ctx->intermediateBuf[1].i64Key)); \ + *(type *)(&ctx->intermediateBuf[1].i64Key) = *(type *)d; \ + *(int64_t *)ctx->ptsOutputBuf = *(int64_t *)(ctx->ptsList + (TSDB_KEYSIZE)*index); \ + } \ + } while (0); + +static bool diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { + char *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + pCtx->numOfIteratedElems += 1; + if (pCtx->intermediateBuf[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is set + pCtx->numOfOutputElems += 1; + } + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + + switch (pCtx->inputType) { + case TSDB_DATA_TYPE_INT: { + if (pCtx->intermediateBuf[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet + pCtx->intermediateBuf[1].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = *(int32_t *)pData; + } else { + *(int32_t *)pCtx->aOutputBuf = *(int32_t *)pData - pCtx->intermediateBuf[1].i64Key; + pCtx->intermediateBuf[1].i64Key = *(int32_t *)pData; + *(int64_t *)pCtx->ptsOutputBuf = pCtx->ptsList[index]; + } + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + TYPE_DIFF_IMPL(pCtx, pData, int64_t); + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + TYPE_DIFF_IMPL(pCtx, pData, double); + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + TYPE_DIFF_IMPL(pCtx, pData, float); + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + TYPE_DIFF_IMPL(pCtx, pData, int16_t); + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + TYPE_DIFF_IMPL(pCtx, pData, int8_t); + break; + }; + default: + pError("error input type"); + } + + if (pCtx->numOfOutputElems > 0) { + pCtx->aOutputBuf = pCtx->aOutputBuf + pCtx->outputBytes * step; + pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * step; + } + return true; +} + +char *arithmetic_callback_function(void *param, char *name, int32_t colId) { + SArithmeticSupport *pSupport = (SArithmeticSupport *)param; + + SSqlFunctionExpr *pExpr = pSupport->pExpr; + int32_t colIndexInBuf = -1; + + for (int32_t i = 0; i < pExpr->pBinExprInfo.numOfCols; ++i) { + if (colId == pExpr->pBinExprInfo.pReqColumns[i].colId) { + colIndexInBuf = pExpr->pBinExprInfo.pReqColumns[i].colIdxInBuf; + break; + } + } + + assert(colIndexInBuf >= 0 && colId >= 0); + return pSupport->data[colIndexInBuf] + pSupport->offset * pSupport->elemSize[colIndexInBuf]; +} + +bool arithmetic_function(SQLFunctionCtx *pCtx) { + pCtx->numOfOutputElems += pCtx->size; + SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; + + tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, pCtx->size, pCtx->aOutputBuf, sas, pCtx->order, + arithmetic_callback_function); + + pCtx->aOutputBuf = pCtx->aOutputBuf + pCtx->outputBytes * pCtx->size * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + return true; +} + +bool arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { + INC_INIT_VAL(pCtx, 1, 1); + SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz; + + sas->offset = index; + tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, 1, pCtx->aOutputBuf, sas, pCtx->order, + arithmetic_callback_function); + + pCtx->aOutputBuf = pCtx->aOutputBuf + pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + return true; +} + +#define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ + { \ + type *inputData = (type *)data; \ + for (int32_t i = 0; i < elemCnt; ++i) { \ + if (isNull((char *)&inputData[i], tsdbType)) { \ + continue; \ + } \ + if (inputData[i] < minOutput) { \ + minOutput = inputData[i]; \ + } \ + if (inputData[i] > maxOutput) { \ + maxOutput = inputData[i]; \ + } \ + numOfNotNullElem++; \ + } \ + } + +#define LIST_MINMAX(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType) \ + { \ + type *inputData = (type *)data; \ + for (int32_t i = 0; i < elemCnt; ++i) { \ + if (inputData[i] < minOutput) { \ + minOutput = inputData[i]; \ + } \ + if (inputData[i] > maxOutput) { \ + maxOutput = inputData[i]; \ + } \ + } \ + } + +void spread_function_setup(SQLFunctionCtx *pCtx) { + if ((pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_DOUBLE) || + (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) || pCtx->inputType == TSDB_DATA_TYPE_BINARY) { + pCtx->intermediateBuf[0].dKey = DBL_MAX; + pCtx->intermediateBuf[3].dKey = -DBL_MAX; + } else { + pError("illegal data type:%d in spread function query", pCtx->inputType); + } + + memset(pCtx->aOutputBuf, 0, pCtx->outputBytes); + INIT_VAL(pCtx); +} + +bool spread_function(SQLFunctionCtx *pCtx) { + int32_t numOfElems = pCtx->size; + + /* column missing cause the hasNullValue to be true */ + if (!IS_DATA_BLOCK_LOADED(pCtx->blockStatus)) { // Pre-aggregation + if (pCtx->preAggVals.isSet) { + numOfElems = pCtx->size - pCtx->preAggVals.numOfNullPoints; + if (numOfElems == 0) { + /* all data are null in current data block, ignore current data block */ + goto _spread_over; + } + + if ((pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) || + (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP)) { + if (pCtx->intermediateBuf[0].dKey > pCtx->preAggVals.min) { + pCtx->intermediateBuf[0].dKey = pCtx->preAggVals.min; + } + + if (pCtx->intermediateBuf[3].dKey < pCtx->preAggVals.max) { + pCtx->intermediateBuf[3].dKey = pCtx->preAggVals.max; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + if (pCtx->intermediateBuf[0].dKey > *(double *)&(pCtx->preAggVals.min)) { + pCtx->intermediateBuf[0].dKey = *(double *)&(pCtx->preAggVals.min); + } + + if (pCtx->intermediateBuf[3].dKey < *(double *)&(pCtx->preAggVals.max)) { + pCtx->intermediateBuf[3].dKey = *(double *)&(pCtx->preAggVals.max); + } + } + } else { + // do nothing + } + + goto _spread_over; + } + + void *pData = GET_INPUT_CHAR(pCtx); + + if (pCtx->hasNullValue) { + numOfElems = 0; + + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + LIST_MINMAX_N(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int8_t, + pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + LIST_MINMAX_N(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int16_t, + pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + LIST_MINMAX_N(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int32_t, + pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT || pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { + LIST_MINMAX_N(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int64_t, + pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + LIST_MINMAX_N(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, double, + pCtx->inputType, numOfElems); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + LIST_MINMAX_N(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, float, + pCtx->inputType, numOfElems); + } + } else { + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + LIST_MINMAX(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int8_t, + pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + LIST_MINMAX(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int16_t, + pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + LIST_MINMAX(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int32_t, + pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT || pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { + LIST_MINMAX(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, int64_t, + pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + LIST_MINMAX(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, double, + pCtx->inputType); + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + LIST_MINMAX(pCtx, pCtx->intermediateBuf[0].dKey, pCtx->intermediateBuf[3].dKey, pCtx->size, pData, float, + pCtx->inputType); + } + } + +_spread_over: + + SET_VAL(pCtx, numOfElems, 1); + return true; +} + +bool spread_function_f(SQLFunctionCtx *pCtx, int32_t index) { + void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); + if (pCtx->hasNullValue && isNull(pData, pCtx->inputType)) { + return true; + } + + SET_VAL(pCtx, 1, 1); + + double val = 0.0; + if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + val = *(int8_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + val = *(int16_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + val = *(int32_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + val = *(int64_t *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + val = *(double *)pData; + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + val = *(float *)pData; + } + + if (pCtx->intermediateBuf[0].dKey > val) { + pCtx->intermediateBuf[0].dKey = val; + } + + if (pCtx->intermediateBuf[3].dKey < val) { + pCtx->intermediateBuf[3].dKey = val; + } + + return true; +} + +void spread_function_finalize(SQLFunctionCtx *pCtx) { + /* + * here we do not check the input data types, because in case of metric query, + * the type of intermediate data is binary + */ + if (pCtx->numOfIteratedElems <= 0) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + return; + } + + *(double *)pCtx->aOutputBuf = pCtx->intermediateBuf[3].dKey - pCtx->intermediateBuf[0].dKey; + SET_VAL(pCtx, pCtx->numOfIteratedElems, 1); +} + +void spread_dist_function_setup(SQLFunctionCtx *pCtx) { + spread_function_setup(pCtx); + + /* + * this is the server-side setup function in client-side, the secondary merge do not need this procedure + */ + if (pCtx->outputType == TSDB_DATA_TYPE_BINARY) { + double *pResData = pCtx->aOutputBuf; + pResData[0] = DBL_MAX; // init min value + pResData[1] = -DBL_MAX; // max value + } +} + +bool spread_dist_intern_function(SQLFunctionCtx *pCtx) { + // restore value for calculation, since the intermediate result kept in output buffer + SSpreadRuntime *pOutput = (SSpreadRuntime *)pCtx->aOutputBuf; + pCtx->intermediateBuf[0].dKey = pOutput->start; + pCtx->intermediateBuf[3].dKey = pOutput->end; + + spread_function(pCtx); + + if (pCtx->numOfIteratedElems) { + pOutput->start = pCtx->intermediateBuf[0].dKey; + pOutput->end = pCtx->intermediateBuf[3].dKey; + pOutput->valid = DATA_SET_FLAG; + } + + return true; +} + +bool spread_dist_intern_function_f(SQLFunctionCtx *pCtx, int32_t index) { + // restore value for calculation, since the intermediate result kept + // in output buffer + SSpreadRuntime *pOutput = (SSpreadRuntime *)pCtx->aOutputBuf; + pCtx->intermediateBuf[0].dKey = pOutput->start; + pCtx->intermediateBuf[3].dKey = pOutput->end; + + spread_function_f(pCtx, index); + + /* keep the result data in output buffer, not in the intermediate buffer */ + if (pCtx->numOfIteratedElems) { + pOutput->start = pCtx->intermediateBuf[0].dKey; + pOutput->end = pCtx->intermediateBuf[3].dKey; + pOutput->valid = DATA_SET_FLAG; + } + return true; +} + +void spread_dist_merge(SQLFunctionCtx *pCtx) { + /* + * min,max in double format + * pResData[0] = min + * pResData[1] = max + */ + SSpreadRuntime *pResData = (SSpreadRuntime *)pCtx->aOutputBuf; + + int32_t notNullElems = 0; + for (int32_t i = 0; i < pCtx->size; ++i) { + SSpreadRuntime *input = (SSpreadRuntime *)GET_INPUT_CHAR_INDEX(pCtx, i); + + /* no assign tag, the value is null */ + if (input->valid != DATA_SET_FLAG) { + continue; + } + + if (pResData->start > input->start) { + pResData->start = input->start; + } + + if (pResData->end < input->end) { + pResData->end = input->end; + } + + pResData->valid = DATA_SET_FLAG; + notNullElems++; + } + + pCtx->numOfIteratedElems += notNullElems; +} + +/* + * here we set the result value back to the intermediate buffer, to apply the finalize the function + * the final result is generated in spread_function_finalize + */ +void spread_dist_second_merge(SQLFunctionCtx *pCtx) { + SSpreadRuntime *pData = (SSpreadRuntime *)GET_INPUT_CHAR(pCtx); + + if (pData->valid != DATA_SET_FLAG) { + return; + } + + if (pCtx->intermediateBuf[0].dKey > pData->start) { + pCtx->intermediateBuf[0].dKey = pData->start; + } + + if (pCtx->intermediateBuf[3].dKey < pData->end) { + pCtx->intermediateBuf[3].dKey = pData->end; + } + + pCtx->numOfIteratedElems += 1; +} + +/* + * Compare two strings + * TSDB_MATCH: Match + * TSDB_NOMATCH: No match + * TSDB_NOWILDCARDMATCH: No match in spite of having * or % wildcards. + * Like matching rules: + * '%': Matches zero or more characters + * '_': Matches one character + * + */ +int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) { + char c, c1; + + int32_t i = 0; + int32_t j = 0; + + while ((c = patterStr[i++]) != 0) { + if (c == pInfo->matchAll) { /* Match "*" */ + + while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) { + if (c == pInfo->matchOne && (j > size || str[j++] == 0)) { + // empty string, return not match + return TSDB_PATTERN_NOWILDCARDMATCH; + } + } + + if (c == 0) { + return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */ + } + + char next[3] = {toupper(c), tolower(c), 0}; + while (1) { + size_t n = strcspn(str, next); + str += n; + + if (str[0] == 0 || (n >= size - 1)) { + break; + } + + int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo); + if (ret != TSDB_PATTERN_NOMATCH) { + return ret; + } + } + return TSDB_PATTERN_NOWILDCARDMATCH; + } + + c1 = str[j++]; + + if (j <= size) { + if (c == c1) { + continue; + } + + if (tolower(c) == tolower(c1)) { + continue; + } + + if (c == pInfo->matchOne && c1 != 0) { + continue; + } + } + + return TSDB_PATTERN_NOMATCH; + } + + return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; +} + +int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, const SPatternCompareInfo *pInfo) { + wchar_t c, c1; + wchar_t matchOne = L'_'; // "_" + wchar_t matchAll = L'%'; // "%" + + int32_t i = 0; + int32_t j = 0; + + while ((c = patterStr[i++]) != 0) { + if (c == matchAll) { /* Match "%" */ + + while ((c = patterStr[i++]) == matchAll || c == matchOne) { + if (c == matchOne && (j > size || str[j++] == 0)) { + return TSDB_PATTERN_NOWILDCARDMATCH; + } + } + if (c == 0) { + return TSDB_PATTERN_MATCH; + } + + wchar_t accept[3] = {towupper(c), towlower(c), 0}; + while (1) { + size_t n = wcsspn(str, accept); + + str += n; + if (str[0] == 0 || (n >= size - 1)) { + break; + } + + str++; + + int32_t ret = WCSPatternMatch(&patterStr[i], str, wcslen(str), pInfo); + if (ret != TSDB_PATTERN_NOMATCH) { + return ret; + } + } + + return TSDB_PATTERN_NOWILDCARDMATCH; + } + + c1 = str[j++]; + + if (j <= size) { + if (c == c1) { + continue; + } + + if (towlower(c) == towlower(c1)) { + continue; + } + if (c == matchOne && c1 != 0) { + continue; + } + } + + return TSDB_PATTERN_NOMATCH; + } + + return str[j] == 0 ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; +} + +static void getStatics_i8(int64_t *primaryKey, int32_t type, int8_t *data, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int64_t *wsum, int32_t *numOfNull) { + *min = INT64_MAX; + *max = INT64_MIN; + *wsum = 0; + + int64_t lastKey = 0; + int8_t lastVal = TSDB_DATA_TINYINT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull((char *)&data[i], type)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + } + + if (*max < data[i]) { + *max = data[i]; + } + + if (type != TSDB_DATA_TYPE_BOOL) { + // ignore the bool data type pre-calculation + if (isNull((char *)&lastVal, type)) { + lastKey = primaryKey[i]; + lastVal = data[i]; + } else { + *wsum = lastVal * (primaryKey[i] - lastKey); + lastKey = primaryKey[i]; + lastVal = data[i]; + } + } + } +} + +static void getStatics_i16(int64_t *primaryKey, int16_t *data, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int64_t *wsum, int32_t *numOfNull) { + *min = INT64_MAX; + *max = INT64_MIN; + *wsum = 0; + + int64_t lastKey = 0; + int16_t lastVal = TSDB_DATA_SMALLINT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull(&data[i], TSDB_DATA_TYPE_SMALLINT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + } + + if (*max < data[i]) { + *max = data[i]; + } + + if (isNull(&lastVal, TSDB_DATA_TYPE_SMALLINT)) { + lastKey = primaryKey[i]; + lastVal = data[i]; + } else { + *wsum = lastVal * (primaryKey[i] - lastKey); + lastKey = primaryKey[i]; + lastVal = data[i]; + } + } +} + +static void getStatics_i32(int64_t *primaryKey, int32_t *data, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int64_t *wsum, int32_t *numOfNull) { + *min = INT64_MAX; + *max = INT64_MIN; + *wsum = 0; + + int64_t lastKey = 0; + int32_t lastVal = TSDB_DATA_INT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull(&data[i], TSDB_DATA_TYPE_INT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + } + + if (*max < data[i]) { + *max = data[i]; + } + + if (isNull(&lastVal, TSDB_DATA_TYPE_INT)) { + lastKey = primaryKey[i]; + lastVal = data[i]; + } else { + *wsum = lastVal * (primaryKey[i] - lastKey); + lastKey = primaryKey[i]; + lastVal = data[i]; + } + } +} + +static void getStatics_i64(int64_t *primaryKey, int64_t *data, int32_t numOfRow, int64_t *min, int64_t *max, + int64_t *sum, int64_t *wsum, int32_t *numOfNull) { + *min = INT64_MAX; + *max = INT64_MIN; + *wsum = 0; + + int64_t lastKey = 0; + int64_t lastVal = TSDB_DATA_BIGINT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull(&data[i], TSDB_DATA_TYPE_BIGINT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + } + + if (*max < data[i]) { + *max = data[i]; + } + + if (isNull(&lastVal, TSDB_DATA_TYPE_BIGINT)) { + lastKey = primaryKey[i]; + lastVal = data[i]; + } else { + *wsum = lastVal * (primaryKey[i] - lastKey); + lastKey = primaryKey[i]; + lastVal = data[i]; + } + } +} + +static void getStatics_f(int64_t *primaryKey, float *data, int32_t numOfRow, double *min, double *max, double *sum, + double *wsum, int32_t *numOfNull) { + *min = DBL_MAX; + *max = -DBL_MAX; + *wsum = 0; + + int64_t lastKey = 0; + float lastVal = TSDB_DATA_FLOAT_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull(&data[i], TSDB_DATA_TYPE_FLOAT)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + } + + if (*max < data[i]) { + *max = data[i]; + } + + if (isNull(&lastVal, TSDB_DATA_TYPE_FLOAT)) { + lastKey = primaryKey[i]; + lastVal = data[i]; + } else { + *wsum = lastVal * (primaryKey[i] - lastKey); + lastKey = primaryKey[i]; + lastVal = data[i]; + } + } +} + +static void getStatics_d(int64_t *primaryKey, double *data, int32_t numOfRow, double *min, double *max, double *sum, + double *wsum, int32_t *numOfNull) { + *min = DBL_MAX; + *max = -DBL_MAX; + *wsum = 0; + + int64_t lastKey = 0; + double lastVal = TSDB_DATA_DOUBLE_NULL; + + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull(&data[i], TSDB_DATA_TYPE_DOUBLE)) { + (*numOfNull) += 1; + continue; + } + + *sum += data[i]; + if (*min > data[i]) { + *min = data[i]; + } + + if (*max < data[i]) { + *max = data[i]; + } + + if (isNull(&lastVal, TSDB_DATA_TYPE_DOUBLE)) { + lastKey = primaryKey[i]; + lastVal = data[i]; + } else { + *wsum = lastVal * (primaryKey[i] - lastKey); + lastKey = primaryKey[i]; + lastVal = data[i]; + } + } +} + +void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max, + int64_t *sum, int64_t *wsum, int32_t *numOfNull) { + int64_t *primaryKey = (int64_t *)priData; + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + for (int32_t i = 0; i < numOfRow; ++i) { + if (isNull(data + i * size, type)) { + (*numOfNull) += 1; + continue; + } + } + } else { + if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) { + getStatics_i8(primaryKey, type, (int8_t *)data, numOfRow, min, max, sum, wsum, numOfNull); + } else if (type == TSDB_DATA_TYPE_SMALLINT) { + getStatics_i16(primaryKey, (int16_t *)data, numOfRow, min, max, sum, wsum, numOfNull); + } else if (type == TSDB_DATA_TYPE_INT) { + getStatics_i32(primaryKey, (int32_t *)data, numOfRow, min, max, sum, wsum, numOfNull); + } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) { + getStatics_i64(primaryKey, (int64_t *)data, numOfRow, min, max, sum, wsum, numOfNull); + } else if (type == TSDB_DATA_TYPE_DOUBLE) { + getStatics_d(primaryKey, (double *)data, numOfRow, min, max, sum, wsum, numOfNull); + } else if (type == TSDB_DATA_TYPE_FLOAT) { + getStatics_f(primaryKey, (float *)data, numOfRow, min, max, sum, wsum, numOfNull); + } + } +} + +void wavg_function_setup(SQLFunctionCtx *pCtx) { + memset(pCtx->aOutputBuf, 0, pCtx->outputBytes); + + pCtx->intermediateBuf[1].nType = TSDB_DATA_TYPE_TIMESTAMP; + pCtx->intermediateBuf[2].nType = -1; + + INIT_VAL(pCtx); +} + +bool wavg_function(SQLFunctionCtx *pCtx) { + void * pData = GET_INPUT_CHAR(pCtx); + int64_t *primaryKey = pCtx->ptsList; + assert(IS_DATA_BLOCK_LOADED(pCtx->blockStatus)); + /* assert(IS_INTER_BLOCK(pCtx->blockStatus)); */ + + int32_t notNullElems = 0; + + if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + int64_t *retVal = pCtx->aOutputBuf; + int32_t *pDb = (int32_t *)pData; + int32_t i = 0; + + // Start diff in the block + for (; i < pCtx->size; ++i) { + assert(primaryKey[i] >= pCtx->intermediateBuf[1].i64Key); + + if (isNull(&pDb[i], TSDB_DATA_TYPE_INT)) continue; + + if (pCtx->intermediateBuf[2].nType == -1) { + pCtx->intermediateBuf[2].i64Key = pDb[i]; + pCtx->intermediateBuf[2].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = pCtx->nStartQueryTimestamp; + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + } else { + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + + break; + } + + /* if (IS_INTER_BLOCK(pCtx->blockStatus)) { */ + /* *retVal += pCtx->preAggVals.wsum; */ + /* } */ + + for (++i; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_INT)) continue; + + notNullElems++; + /* if (!IS_INTER_BLOCK(pCtx->blockStatus)) { */ + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + /* } */ + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + int64_t *retVal = pCtx->aOutputBuf; + int64_t *pDb = (int64_t *)pData; + int32_t i = 0; + + // Start diff in the block + for (; i < pCtx->size; ++i) { + assert(primaryKey[i] >= pCtx->intermediateBuf[1].i64Key); + + if (isNull(&pDb[i], TSDB_DATA_TYPE_BIGINT)) continue; + + if (pCtx->intermediateBuf[2].nType == -1) { + pCtx->intermediateBuf[2].i64Key = pDb[i]; + pCtx->intermediateBuf[2].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = pCtx->nStartQueryTimestamp; + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + } else { + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + + break; + } + + /* if (IS_INTER_BLOCK(pCtx->blockStatus)) { */ + /* *retVal += pCtx->preAggVals.wsum; */ + /* } */ + + for (++i; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_BIGINT)) continue; + + notNullElems++; + /* if (!IS_INTER_BLOCK(pCtx->blockStatus)) { */ + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + /* } */ + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *retVal = pCtx->aOutputBuf; + double *pDb = (double *)pData; + int32_t i = 0; + + // Start diff in the block + for (; i < pCtx->size; ++i) { + assert(primaryKey[i] >= pCtx->intermediateBuf[1].i64Key); + + if (isNull(&pDb[i], TSDB_DATA_TYPE_DOUBLE)) continue; + + if (pCtx->intermediateBuf[2].nType == -1) { + pCtx->intermediateBuf[2].dKey = pDb[i]; + pCtx->intermediateBuf[2].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = pCtx->nStartQueryTimestamp; + *retVal += pCtx->intermediateBuf[2].dKey * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + } else { + *retVal += pCtx->intermediateBuf[2].dKey * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].dKey = pDb[i]; + } + + break; + } + + /* if (IS_INTER_BLOCK(pCtx->blockStatus)) { */ + /* *retVal += *(double *)(&(pCtx->preAggVals.wsum)); */ + /* } */ + + for (++i; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_DOUBLE)) continue; + + notNullElems++; + *retVal += pCtx->intermediateBuf[2].dKey * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].dKey = pDb[i]; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + double *retVal = pCtx->aOutputBuf; + float * pDb = (float *)pData; + int32_t i = 0; + + // Start diff in the block + for (; i < pCtx->size; ++i) { + assert(primaryKey[i] >= pCtx->intermediateBuf[1].i64Key); + + if (isNull(&pDb[i], TSDB_DATA_TYPE_FLOAT)) continue; + + if (pCtx->intermediateBuf[2].nType == -1) { + pCtx->intermediateBuf[2].dKey = pDb[i]; + pCtx->intermediateBuf[2].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = pCtx->nStartQueryTimestamp; + *retVal += pCtx->intermediateBuf[2].dKey * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + } else { + *retVal += pCtx->intermediateBuf[2].dKey * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].dKey = pDb[i]; + } + + break; + } + + /* if (IS_INTER_BLOCK(pCtx->blockStatus)) { */ + /* *retVal += *(double *)(&(pCtx->preAggVals.wsum)); */ + /* } */ + + for (++i; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_FLOAT)) continue; + + notNullElems++; + *retVal += pCtx->intermediateBuf[2].dKey * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].dKey = pDb[i]; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + int64_t *retVal = pCtx->aOutputBuf; + int16_t *pDb = (int16_t *)pData; + int32_t i = 0; + + // Start diff in the block + for (; i < pCtx->size; ++i) { + assert(primaryKey[i] >= pCtx->intermediateBuf[1].i64Key); + + if (isNull(&pDb[i], TSDB_DATA_TYPE_SMALLINT)) continue; + + if (pCtx->intermediateBuf[2].nType == -1) { + pCtx->intermediateBuf[2].i64Key = pDb[i]; + pCtx->intermediateBuf[2].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = pCtx->nStartQueryTimestamp; + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + } else { + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + + break; + } + + for (++i; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_SMALLINT)) continue; + + notNullElems++; + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + int64_t *retVal = pCtx->aOutputBuf; + int8_t * pDb = (int8_t *)pData; + int32_t i = 0; + + // Start diff in the block + for (; i < pCtx->size; ++i) { + assert(primaryKey[i] >= pCtx->intermediateBuf[1].i64Key); + + if (isNull((char *)&pDb[i], TSDB_DATA_TYPE_TINYINT)) continue; + + if (pCtx->intermediateBuf[2].nType == -1) { + pCtx->intermediateBuf[2].i64Key = pDb[i]; + pCtx->intermediateBuf[2].nType = pCtx->inputType; + pCtx->intermediateBuf[1].i64Key = pCtx->nStartQueryTimestamp; + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + } else { + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + + break; + } + + for (++i; i < pCtx->size; i++) { + if (isNull((char *)&pDb[i], TSDB_DATA_TYPE_TINYINT)) continue; + + notNullElems++; + *retVal += pCtx->intermediateBuf[2].i64Key * (primaryKey[i] - pCtx->intermediateBuf[1].i64Key); + pCtx->intermediateBuf[1].i64Key = primaryKey[i]; + pCtx->intermediateBuf[2].i64Key = pDb[i]; + } + } + + pCtx->numOfIteratedElems += notNullElems; + + return true; +} + +bool wavg_function_f(SQLFunctionCtx *pCtx, int32_t index) { + // TODO : + return false; +} +void wavg_function_finalize(SQLFunctionCtx *pCtx) { + if (pCtx->intermediateBuf[2].nType == -1) { + *((double *)(pCtx->aOutputBuf)) = TSDB_DATA_DOUBLE_NULL; + SET_VAL(pCtx, 0, 0); + return; + } + + assert(pCtx->intermediateBuf[3].i64Key >= pCtx->intermediateBuf[1].i64Key); + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + int64_t *retVal = pCtx->aOutputBuf; + *retVal += pCtx->intermediateBuf[2].i64Key * (pCtx->intermediateBuf[3].i64Key - pCtx->intermediateBuf[1].i64Key); + *(double *)pCtx->aOutputBuf = (*retVal) / (double)(pCtx->intermediateBuf[3].i64Key - pCtx->nStartQueryTimestamp); + } else { + double *retVal = pCtx->aOutputBuf; + *retVal += pCtx->intermediateBuf[2].dKey * (pCtx->intermediateBuf[3].i64Key - pCtx->intermediateBuf[1].i64Key); + *retVal = *retVal / (pCtx->intermediateBuf[3].i64Key - pCtx->nStartQueryTimestamp); + } + SET_VAL(pCtx, 1, 1); +} + +static bool wavg_dist_function(SQLFunctionCtx *pCtx) { + void * pData = GET_INPUT_CHAR(pCtx); + int64_t *primaryKey = pCtx->ptsList; + assert(IS_DATA_BLOCK_LOADED(pCtx->blockStatus)); + + SWavgRuntime *output = pCtx->aOutputBuf; + + output->type = pCtx->inputType; + int32_t notNullElems = 0; + + if (pCtx->inputType == TSDB_DATA_TYPE_INT) { + int32_t *pDb = (int32_t *)pData; + + for (int32_t i = 0; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_INT)) continue; + + notNullElems++; + SET_HAS_DATA_FLAG(output->valFlag); + + output->iOutput += output->iLastValue * (primaryKey[i] - output->lastKey); + output->lastKey = primaryKey[i]; + output->iLastValue = pDb[i]; + } + + } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { + int64_t *pDb = (int64_t *)pData; + + for (int32_t i = 0; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_BIGINT)) continue; + + notNullElems++; + SET_HAS_DATA_FLAG(output->valFlag); + + output->iOutput += output->iLastValue * (primaryKey[i] - output->lastKey); + output->lastKey = primaryKey[i]; + output->iLastValue = pDb[i]; + } + + } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { + double *pDb = (double *)pData; + + for (int32_t i = 0; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_BIGINT)) continue; + + notNullElems++; + SET_HAS_DATA_FLAG(output->valFlag); + + output->dOutput += output->dLastValue * (primaryKey[i] - output->lastKey); + output->lastKey = primaryKey[i]; + output->dLastValue = pDb[i]; + } + + } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { + float *pDb = (float *)pData; + + for (int32_t i = 0; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_FLOAT)) continue; + + notNullElems++; + SET_HAS_DATA_FLAG(output->valFlag); + + output->dOutput += output->dLastValue * (primaryKey[i] - output->lastKey); + output->lastKey = primaryKey[i]; + output->dLastValue = pDb[i]; + } + + } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { + int16_t *pDb = (int16_t *)pData; + + for (int32_t i = 0; i < pCtx->size; i++) { + if (isNull(&pDb[i], TSDB_DATA_TYPE_SMALLINT)) continue; + + notNullElems++; + SET_HAS_DATA_FLAG(output->valFlag); + + output->iOutput += output->iLastValue * (primaryKey[i] - output->lastKey); + output->lastKey = primaryKey[i]; + output->iLastValue = pDb[i]; + } + } else if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { + int8_t *pDb = (int8_t *)pData; + + for (int32_t i = 0; i < pCtx->size; i++) { + if (isNull((char *)&pDb[i], TSDB_DATA_TYPE_TINYINT)) continue; + + notNullElems++; + SET_HAS_DATA_FLAG(output->valFlag); + + output->iOutput += output->iLastValue * (primaryKey[i] - output->lastKey); + output->lastKey = primaryKey[i]; + output->iLastValue = pDb[i]; + } + } + + SET_VAL(pCtx, notNullElems, 1); + return true; +} + +static bool wavg_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { return false; } + +static void wavg_dist_merge(SQLFunctionCtx *pCtx) { + SWavgRuntime *pBuf = (SWavgRuntime *)pCtx->aOutputBuf; + char * indicator = pCtx->aInputElemBuf; + + int32_t numOfNotNull = 0; + for (int32_t i = 0; i < pCtx->size; ++i) { + SWavgRuntime *pInput = indicator; + + if (!HAS_DATA_FLAG(pInput->valFlag)) { + indicator += sizeof(SWavgRuntime); + continue; + } + + numOfNotNull++; + if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { + pBuf->iOutput += pInput->iOutput; + } else { + pBuf->dOutput += pInput->dOutput; + } + + pBuf->sKey = pInput->sKey; + pBuf->eKey = pInput->eKey; + pBuf->lastKey = pInput->lastKey; + pBuf->iLastValue = pInput->iLastValue; + } + + SET_VAL(pCtx, numOfNotNull, 1); +} + +static void wavg_dist_second_merge(SQLFunctionCtx *pCtx) { + SWavgRuntime *pWavg = (SWavgRuntime *)pCtx->aInputElemBuf; + + if (!HAS_DATA_FLAG(pWavg->valFlag)) { + *((int64_t *)(pCtx->aOutputBuf)) = TSDB_DATA_DOUBLE_NULL; + SET_VAL(pCtx, 0, 0); + return; + } + + if (pWavg->type >= TSDB_DATA_TYPE_TINYINT && pWavg->type <= TSDB_DATA_TYPE_BIGINT) { + *(double *)pCtx->aOutputBuf = + (pWavg->iOutput + pWavg->iLastValue * (pWavg->eKey - pWavg->lastKey)) / (double)(pWavg->eKey - pWavg->sKey); + } else { + *(double *)pCtx->aOutputBuf = + (pWavg->dOutput + pWavg->dLastValue * (pWavg->eKey - pWavg->lastKey)) / (pWavg->eKey - pWavg->sKey); + } + SET_VAL(pCtx, 1, 1); +} + +/** + * param[1]: default value/previous value of specified timestamp + * param[2]: next value of specified timestamp + * param[3]: denotes if the result is a precious result or interpolation results + * + * intermediate[0]: interpolation type + * intermediate[1]: precious specified timestamp, the pCtx->startTimetamp is changed during query to satisfy the query procedure + * intermediate[2]: flag that denotes if it is a primary timestamp column or not + * intermediate[3]: tags. reserved for tags, the function is available for stable query, so the intermediate[3] must be reserved. + * + * @param pCtx + */ +static bool interp_function(SQLFunctionCtx *pCtx) { + /* at this point, the value is existed, return directly */ + if (pCtx->param[3].i64Key == 1) { + char *pData = GET_INPUT_CHAR(pCtx); + assignVal(pCtx->aOutputBuf, pData, pCtx->inputBytes, pCtx->inputType); + } else { + /* + * use interpolation to generate the result. + * Note: the result of primary timestamp column uses the timestamp specified by user in the query sql + */ + assert(pCtx->param[3].i64Key == 2); + int32_t interpoType = pCtx->intermediateBuf[0].i64Key; + + if (interpoType == TSDB_INTERPO_NONE) { + /* set no output result */ + pCtx->param[3].i64Key = 0; + } else if (pCtx->intermediateBuf[2].i64Key == 1) { + *(TSKEY *)pCtx->aOutputBuf = pCtx->intermediateBuf[1].i64Key; + } else { + if (interpoType == TSDB_INTERPO_NULL) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + } else if (interpoType == TSDB_INTERPO_SET_VALUE) { + tVariantDump(&pCtx->param[1], pCtx->aOutputBuf, pCtx->inputType); + } else if (interpoType == TSDB_INTERPO_PREV) { + if (strcmp(pCtx->param[1].pz, TSDB_DATA_NULL_STR_L) == 0) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + goto _end; + } + + char *data = pCtx->param[1].pz; + char *pVal = NULL; + if (pCtx->param[1].nType == TSDB_DATA_TYPE_BINARY) { + pVal = strsep(&data, ","); + pVal = strsep(&data, ","); + } else { + wchar_t *token = NULL; + pVal = wcstok(data, L",", &token); + pVal = wcstok(NULL, L",", &token); + } + + if ((pCtx->outputType >= TSDB_DATA_TYPE_BOOL && pCtx->outputType <= TSDB_DATA_TYPE_BIGINT) || + pCtx->outputType == TSDB_DATA_TYPE_TIMESTAMP) { + int64_t v = strtoll(pVal, NULL, 10); + assignVal(pCtx->aOutputBuf, &v, pCtx->outputBytes, pCtx->outputType); + } else if (pCtx->outputType == TSDB_DATA_TYPE_FLOAT) { + float v = (float)strtod(pVal, NULL); + if (isNull(&v, pCtx->outputType)) { + setNull(pCtx->aOutputBuf, pCtx->inputType, pCtx->inputBytes); + } else { + assignVal(pCtx->aOutputBuf, &v, pCtx->outputBytes, pCtx->outputType); + } + } else if (pCtx->outputType == TSDB_DATA_TYPE_DOUBLE) { + double v = strtod(pVal, NULL); + if (isNull(&v, pCtx->outputType)) { + setNull(pCtx->aOutputBuf, pCtx->inputType, pCtx->inputBytes); + } else { + assignVal(pCtx->aOutputBuf, &v, pCtx->outputBytes, pCtx->outputType); + } + } else if (pCtx->outputType == TSDB_DATA_TYPE_BINARY) { + assignVal(pCtx->aOutputBuf, pVal, pCtx->outputBytes, pCtx->outputType); + } else if (pCtx->outputType == TSDB_DATA_TYPE_NCHAR) { + assignVal(pCtx->aOutputBuf, pVal, pCtx->outputBytes, pCtx->outputType); + } + + } else if (interpoType == TSDB_INTERPO_LINEAR) { + if (strcmp(pCtx->param[1].pz, TSDB_DATA_NULL_STR_L) == 0) { + setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes); + goto _end; + } + + char *data1 = pCtx->param[1].pz; + char *data2 = pCtx->param[2].pz; + + char *pTimestamp1 = strsep(&data1, ","); + char *pTimestamp2 = strsep(&data2, ","); + + char *pVal1 = strsep(&data1, ","); + char *pVal2 = strsep(&data2, ","); + + SPoint point1 = {.key = strtol(pTimestamp1, NULL, 10), .val = &pCtx->param[1].i64Key}; + SPoint point2 = {.key = strtol(pTimestamp2, NULL, 10), .val = &pCtx->param[2].i64Key}; + + SPoint point = {.key = pCtx->intermediateBuf[1].i64Key, .val = pCtx->aOutputBuf}; + + int32_t srcType = pCtx->inputType; + if ((srcType >= TSDB_DATA_TYPE_TINYINT && srcType <= TSDB_DATA_TYPE_BIGINT) || + srcType == TSDB_DATA_TYPE_TIMESTAMP) { + int64_t v1 = strtol(pVal1, NULL, 10); + point1.val = &v1; + + int64_t v2 = strtol(pVal2, NULL, 10); + point2.val = &v2; + + if (isNull(&v1, srcType) || isNull(&v2, srcType)) { + setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); + } else { + taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point); + } + } else if (srcType == TSDB_DATA_TYPE_FLOAT) { + float v1 = strtod(pVal1, NULL); + point1.val = &v1; + + float v2 = strtod(pVal2, NULL); + point2.val = &v2; + + if (isNull(&v1, srcType) || isNull(&v2, srcType)) { + setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); + } else { + taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point); + } + } else if (srcType == TSDB_DATA_TYPE_DOUBLE) { + double v1 = strtod(pVal1, NULL); + point1.val = &v1; + + double v2 = strtod(pVal2, NULL); + point2.val = &v2; + + if (isNull(&v1, srcType) || isNull(&v2, srcType)) { + setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); + } else { + taosDoLinearInterpolation(pCtx->outputType, &point1, &point2, &point); + } + } else if (srcType == TSDB_DATA_TYPE_BOOL || srcType == TSDB_DATA_TYPE_BINARY || + srcType == TSDB_DATA_TYPE_NCHAR) { + setNull(pCtx->aOutputBuf, srcType, pCtx->inputBytes); + } + } + } + } + +_end: + pCtx->size = pCtx->param[3].i64Key; + + tVariantDestroy(&pCtx->param[1]); + tVariantDestroy(&pCtx->param[2]); + + // data in the check operation are all null, not output + SET_VAL(pCtx, pCtx->size, 1); + return false; +} + +/* + * function with the same value is compatible in selection clause + * Note: tag function, ts function is not need to check the compatible with other functions + * + * top/bottom is the last one + */ +int32_t funcCompatList[36] = { + /* count, sum, avg, min, max, stddev, percentile, apercentile, first, + last, last_row, leastsqr, */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 1, + + /* top, bottom, spread, wavg, ts, ts_dummy, tag, colprj, tagprj, + arithmetic, diff, */ + 2, 5, 1, 1, 1, 1, 1, 3, 3, 3, 4, + + /*sum_d, avg_d, min_d, max_d, first_d, last_d, last_row_d, spread_dst, + wavg_dst, top_dst, bottom_dst, */ + 1, 1, 1, 1, 1, 1, 7, 1, 1, 2, 5, + + /*apercentile_dst, interp*/ + 1, 6, +}; + +SQLAggFuncElem aAggs[36] = { + { + // 0 + "count", TSDB_FUNC_COUNT, TSDB_FUNC_COUNT, TSDB_BASE_FUNC_SO, function_setup, count_function, count_function_f, + no_next_step, noop, count_dist_merge, count_dist_merge, count_load_data_info, + }, + { + // 1 + "sum", TSDB_FUNC_SUM, TSDB_FUNC_SUM_DST, TSDB_BASE_FUNC_SO, function_setup, sum_function, sum_function_f, + no_next_step, function_finalize, noop, noop, precal_req_load_info, + }, + { + // 2 + "avg", TSDB_FUNC_AVG, TSDB_FUNC_AVG_DST, TSDB_BASE_FUNC_SO, function_setup, sum_function, sum_function_f, + no_next_step, avg_finalizer, noop, noop, precal_req_load_info, + }, + { + // 3 + "min", TSDB_FUNC_MIN, TSDB_FUNC_MIN_DST, TSDB_BASE_FUNC_SO, min_function_setup, min_function, min_function_f, + no_next_step, function_finalize, noop, noop, precal_req_load_info, + }, + { + // 4 + "max", TSDB_FUNC_MAX, TSDB_FUNC_MAX_DST, TSDB_BASE_FUNC_SO, max_function_setup, max_function, max_function_f, + no_next_step, function_finalize, noop, noop, precal_req_load_info, + }, + { + // 5 + "stddev", TSDB_FUNC_STDDEV, TSDB_FUNC_INVALID_ID, TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, + function_setup, stddev_function, stddev_function_f, stddev_next_step, stddev_finalizer, noop, noop, + data_req_load_info, + }, + { + // 6 + "percentile", TSDB_FUNC_PERCT, TSDB_FUNC_INVALID_ID, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, percentile_function_setup, percentile_function, + percentile_function_f, no_next_step, percentile_finalizer, noop, noop, data_req_load_info, + }, + { + // 7 + "apercentile", TSDB_FUNC_APERCT, TSDB_FUNC_APERCT_DST, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, function_setup, apercentile_function, + apercentile_function_f, no_next_step, apercentile_finalizer, noop, noop, data_req_load_info, + }, + { + // 8 + "first", TSDB_FUNC_FIRST, TSDB_FUNC_FIRST_DST, TSDB_BASE_FUNC_SO, function_setup, first_function, + first_function_f, no_next_step, function_finalize, noop, noop, first_data_req_info, + }, + { + // 9 + "last", TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST, TSDB_BASE_FUNC_SO, function_setup, last_function, last_function_f, + no_next_step, function_finalize, noop, noop, last_data_req_info, + }, + { + // 10 + "last_row", TSDB_FUNC_LAST_ROW, TSDB_FUNC_LAST_ROW_DST, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC, function_setup, interp_function, noop, + no_next_step, noop, noop, copy_function, no_data_info, + }, + { + // 11 + "leastsquares", TSDB_FUNC_LEASTSQR, TSDB_FUNC_INVALID_ID, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, leastsquares_function_setup, + leastsquares_function, leastsquares_function_f, no_next_step, leastsquare_finalizer, noop, noop, + data_req_load_info, + }, + { + // 12 + "top", TSDB_FUNC_TOP, TSDB_FUNC_TOP_DST, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_NEED_TS, + top_bottom_function_setup, top_function, top_function_f, no_next_step, top_bottom_function_finalizer, noop, + noop, data_req_load_info, + }, + { + // 13 + "bottom", TSDB_FUNC_BOTTOM, TSDB_FUNC_BOTTOM_DST, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_NEED_TS, top_bottom_function_setup, bottom_function, + bottom_function_f, no_next_step, top_bottom_function_finalizer, noop, noop, data_req_load_info, + }, + { + // 14 + "spread", TSDB_FUNC_SPREAD, TSDB_FUNC_SPREAD_DST, TSDB_BASE_FUNC_SO, spread_function_setup, spread_function, + spread_function_f, no_next_step, spread_function_finalize, noop, noop, count_load_data_info, + }, + { + // 15 + "wavg", TSDB_FUNC_WAVG, TSDB_FUNC_WAVG_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, wavg_function_setup, + wavg_function, wavg_function_f, no_next_step, wavg_function_finalize, noop, noop, data_req_load_info, + }, + { + // 16 + "ts", TSDB_FUNC_TS, TSDB_FUNC_TS, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, + date_col_output_function, date_col_output_function, no_next_step, noop, copy_function, copy_function, + no_data_info, + }, + { + // 17 + "ts", TSDB_FUNC_TS_DUMMY, TSDB_FUNC_TS_DUMMY, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, noop, + noop, no_next_step, noop, copy_function, copy_function, no_data_info, + }, + { + // 18 + "tag", TSDB_FUNC_TAG, TSDB_FUNC_TAG, TSDB_BASE_FUNC_SO, function_setup, tag_function, tag_function_f, + no_next_step, noop, copy_function, copy_function, no_data_info, + }, + // column project sql function + { + // 19 + "colprj", TSDB_FUNC_PRJ, TSDB_FUNC_PRJ, TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS, function_setup, + col_project_function, col_project_function_f, no_next_step, noop, copy_function, copy_function, + data_req_load_info, + }, + { + // 20 + "tagprj", TSDB_FUNC_TAGPRJ, TSDB_FUNC_TAGPRJ, + TSDB_BASE_FUNC_MO, // multi-output, tag function has only one result + function_setup, tag_project_function, tag_project_function_f, no_next_step, noop, copy_function, copy_function, + no_data_info, + }, + { + // 21 + "arithmetic", TSDB_FUNC_ARITHM, TSDB_FUNC_ARITHM, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, function_setup, arithmetic_function, + arithmetic_function_f, no_next_step, noop, copy_function, copy_function, data_req_load_info, + }, + { + // 22 + "diff", TSDB_FUNC_DIFF, TSDB_FUNC_INVALID_ID, TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, diff_function_setup, + diff_function, diff_function_f, no_next_step, noop, noop, noop, data_req_load_info, + }, + // distriubted version used in two-stage aggregation processes + { + // 23 + "sum_dst", TSDB_FUNC_SUM_DST, TSDB_FUNC_SUM_DST, TSDB_BASE_FUNC_SO, function_setup, sum_dist_intern_function, + sum_dist_intern_function_f, no_next_step, function_finalize, sum_dist_merge, sum_dist_second_merge, + precal_req_load_info, + }, + { + // 24 + "avg_dst", TSDB_FUNC_AVG_DST, TSDB_FUNC_AVG_DST, TSDB_BASE_FUNC_SO, avg_dist_function_setup, + avg_dist_intern_function, avg_dist_intern_function_f, no_next_step, avg_finalizer, avg_dist_merge, + avg_dist_second_merge, precal_req_load_info, + }, + { + // 25 + "min_dst", TSDB_FUNC_MIN_DST, TSDB_FUNC_MIN_DST, TSDB_BASE_FUNC_SO, min_function_setup, + min_dist_intern_function, min_dist_intern_function_f, no_next_step, function_finalize, min_dist_merge, + min_dist_second_merge, precal_req_load_info, + }, + { + // 26 + "max_dst", TSDB_FUNC_MAX_DST, TSDB_FUNC_MAX_DST, TSDB_BASE_FUNC_SO, max_function_setup, + max_dist_intern_function, max_dist_intern_function_f, no_next_step, function_finalize, max_dist_merge, + max_dist_second_merge, precal_req_load_info, + }, + { + // 27 + "first_dist", TSDB_FUNC_FIRST_DST, TSDB_FUNC_FIRST_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + function_setup, first_dist_function, first_dist_function_f, no_next_step, function_finalize, first_dist_merge, + first_dist_second_merge, first_dist_data_req_info, + }, + { + // 28 + "last_dist", TSDB_FUNC_LAST_DST, TSDB_FUNC_LAST_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, + last_dist_function, last_dist_function_f, no_next_step, function_finalize, last_dist_merge, + last_dist_second_merge, last_dist_data_req_info, + }, + { + // 29 + "last_row_dist", TSDB_FUNC_LAST_ROW_DST, TSDB_FUNC_LAST_ROW_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, + function_setup, last_row_dist_function, noop, no_next_step, function_finalize, noop, last_dist_second_merge, + data_req_load_info, // this function is not necessary + }, + { + // 30 + "spread_dst", TSDB_FUNC_SPREAD_DST, TSDB_FUNC_SPREAD_DST, TSDB_BASE_FUNC_SO, spread_dist_function_setup, + spread_dist_intern_function, spread_dist_intern_function_f, no_next_step, + spread_function_finalize, // no finalize + spread_dist_merge, spread_dist_second_merge, count_load_data_info, + }, + { + // 31 + "wavg_dst", TSDB_FUNC_WAVG_DST, TSDB_FUNC_WAVG_DST, TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, + wavg_dist_function, wavg_dist_function_f, no_next_step, noop, wavg_dist_merge, wavg_dist_second_merge, + data_req_load_info, + }, + { + // 32 + "top_dst", TSDB_FUNC_TOP_DST, TSDB_FUNC_TOP_DST, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, top_bottom_dist_function_setup, + top_dist_function, top_dist_function_f, no_next_step, top_bottom_function_finalizer, top_dist_merge, + top_dist_second_merge, data_req_load_info, + }, + { + // 33 + "bottom_dst", TSDB_FUNC_BOTTOM_DST, TSDB_FUNC_BOTTOM_DST, + TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, top_bottom_dist_function_setup, + bottom_dist_function, bottom_dist_function_f, no_next_step, top_bottom_function_finalizer, bottom_dist_merge, + bottom_dist_second_merge, data_req_load_info, + }, + { + // 34 + "apercentile_dst", TSDB_FUNC_APERCT_DST, TSDB_FUNC_APERCT_DST, TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_METRIC, + apercentile_dist_function_setup, apercentile_dist_intern_function, apercentile_dist_intern_function_f, + no_next_step, apercentile_finalizer, apercentile_dist_merge, apercentile_dist_second_merge, data_req_load_info, + }, + { + // 35 + "interp", TSDB_FUNC_INTERP, TSDB_FUNC_INTERP, + TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_NEED_TS, function_setup, + interp_function, + sum_function_f, // todo filter handle + no_next_step, noop, noop, copy_function, no_data_info, + }}; diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c new file mode 100644 index 000000000000..dcf9c468b16f --- /dev/null +++ b/src/client/src/tscLocal.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "taosmsg.h" + +#include "tcache.h" +#include "tscUtil.h" +#include "tsclient.h" +#include "ttypes.h" + +#include "textbuffer.h" +#include "tscSecondaryMerge.h" +#include "tschemautil.h" +#include "tsocket.h" + +static int32_t getToStringLength(char *pData, int32_t length, int32_t type) { + char buf[512] = {0}; + + int32_t len = 0; + int32_t MAX_BOOL_TYPE_LENGTH = 5; // max(strlen("true"), strlen("false")); + switch (type) { + case TSDB_DATA_TYPE_BINARY: + return length; + case TSDB_DATA_TYPE_NCHAR: + return length; + case TSDB_DATA_TYPE_DOUBLE: + len = sprintf(buf, "%lf", *(double *)pData); + break; + case TSDB_DATA_TYPE_FLOAT: + len = sprintf(buf, "%f", *(float *)pData); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + len = sprintf(buf, "%ld", *(int64_t *)pData); + break; + case TSDB_DATA_TYPE_BOOL: + len = MAX_BOOL_TYPE_LENGTH; + break; + default: + len = sprintf(buf, "%d", *(int32_t *)pData); + break; + }; + return len; +} + +/* + * we need to convert all data into string, so we need to sprintf all kinds of + * non-string data into string, and record its length to get the right + * maximum length. The length may be less or greater than its original binary length: + * For example: + * length((short) 1) == 1, less than sizeof(short) + * length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t) + */ +static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) { + SMeterMeta *pMeta = pSql->cmd.pMeterMeta; + + if (pMeta->meterType != TSDB_METER_MTABLE) { + return 0; + } + + char * pTagValue = tsGetTagsValue(pMeta); + SSchema *pTagsSchema = tsGetTagSchema(pMeta); + + int32_t len = getToStringLength(pTagValue, pTagsSchema[0].bytes, pTagsSchema[0].type); + + pTagValue += pTagsSchema[0].bytes; + for (int32_t i = 1; i < pMeta->numOfTags; ++i) { + int32_t tLen = getToStringLength(pTagValue, pTagsSchema[i].bytes, pTagsSchema[i].type); + if (len < tLen) { + len = tLen; + } + + pTagValue += pTagsSchema[i].bytes; + } + + return len; +} + +static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { + SSqlRes *pRes = &pSql->res; + + // one column for each row + SSqlCmd * pCmd = &pSql->cmd; + SMeterMeta *pMeta = pCmd->pMeterMeta; + + /* + * tagValueCnt is to denote the number of tags columns for meter, not metric. and is to show the column data. + * for meter, which is created according to metric, the value of tagValueCnt is not 0, and the numOfTags must be 0. + * for metric, the value of tagValueCnt must be 0, but the numOfTags is not 0 + */ + + int32_t numOfRows = pMeta->numOfColumns; + int32_t totalNumOfRows = numOfRows + pMeta->numOfTags; + + if (UTIL_METER_IS_METRIC(pCmd)) { + numOfRows = pMeta->numOfColumns + pMeta->numOfTags; + } + + tscInitResObjForLocalQuery(pSql, totalNumOfRows, rowLen); + SSchema *pSchema = tsGetSchema(pMeta); + + for (int32_t i = 0; i < numOfRows; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); + strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, + TSDB_COL_NAME_LEN); + + char *type = tDataTypeDesc[pSchema[i].type].aName; + + pField = tscFieldInfoGetField(pCmd, 1); + strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); + + int32_t bytes = pSchema[i].bytes; + if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes / TSDB_NCHAR_SIZE; + } + + pField = tscFieldInfoGetField(pCmd, 2); + *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes; + + pField = tscFieldInfoGetField(pCmd, 3); + if (i >= pMeta->numOfColumns && pMeta->numOfTags != 0) { + strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i, "tag", + strlen("tag") + 1); + } + } + + if (UTIL_METER_IS_METRIC(pCmd)) { + return 0; + } + + // the following is handle display tags value for meters created according to metric + char *pTagValue = tsGetTagsValue(pMeta); + for (int32_t i = numOfRows; i < totalNumOfRows; ++i) { + // field name + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0); + strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name, + TSDB_COL_NAME_LEN); + + // type name + pField = tscFieldInfoGetField(pCmd, 1); + char *type = tDataTypeDesc[pSchema[i].type].aName; + strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes); + + // type length + int32_t bytes = pSchema[i].bytes; + pField = tscFieldInfoGetField(pCmd, 2); + if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes / TSDB_NCHAR_SIZE; + } + + *(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes; + + // tag value + pField = tscFieldInfoGetField(pCmd, 3); + char *target = pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i; + + if (isNull(pTagValue, pSchema[i].type)) { + sprintf(target, "%s ", TSDB_DATA_NULL_STR); + } else { + switch (pSchema[i].type) { + case TSDB_DATA_TYPE_BINARY: + /* binary are not null-terminated string */ + strncpy(target, pTagValue, pSchema[i].bytes); + break; + case TSDB_DATA_TYPE_NCHAR: + taosUcs4ToMbs(pTagValue, pSchema[i].bytes, target); + break; + case TSDB_DATA_TYPE_FLOAT: + sprintf(target, "%f", *(float *)pTagValue); + break; + case TSDB_DATA_TYPE_DOUBLE: + sprintf(target, "%lf", *(double *)pTagValue); + break; + case TSDB_DATA_TYPE_TINYINT: + sprintf(target, "%d", *(int8_t *)pTagValue); + break; + case TSDB_DATA_TYPE_SMALLINT: + sprintf(target, "%d", *(int16_t *)pTagValue); + break; + case TSDB_DATA_TYPE_INT: + sprintf(target, "%d", *(int32_t *)pTagValue); + break; + case TSDB_DATA_TYPE_BIGINT: + sprintf(target, "%ld", *(int64_t *)pTagValue); + break; + case TSDB_DATA_TYPE_BOOL: { + char *val = (*((int8_t *)pTagValue) == 0) ? "false" : "true"; + sprintf(target, "%s", val); + break; + } + default: + break; + } + } + + pTagValue += pSchema[i].bytes; + } + + return 0; +} + +static int32_t tscBuildMeterSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, int32_t typeColLength, + int32_t noteColLength) { + int32_t rowLen = 0; + SSqlCmd *pCmd = &pSql->cmd; + pCmd->numOfCols = numOfCols; + + pCmd->order.order = TSQL_SO_ASC; + + tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN); + rowLen += TSDB_COL_NAME_LEN; + + tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength); + rowLen += typeColLength; + + tscFieldInfoSetValue(&pCmd->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t)); + rowLen += sizeof(int32_t); + + tscFieldInfoSetValue(&pCmd->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength); + rowLen += noteColLength; + + return rowLen; +} + +static int32_t tscProcessDescribeTable(SSqlObj *pSql) { + assert(pSql->cmd.pMeterMeta != NULL); + + const int32_t NUM_OF_DESCRIBE_TABLE_COLUMNS = 4; + const int32_t TYPE_COLUMN_LENGTH = 16; + const int32_t NOTE_COLUMN_MIN_LENGTH = 8; + + int32_t note_field_length = tscMaxLengthOfTagsFields(pSql); + if (note_field_length == 0) { + note_field_length = NOTE_COLUMN_MIN_LENGTH; + } + + int32_t rowLen = + tscBuildMeterSchemaResultFields(pSql, NUM_OF_DESCRIBE_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, note_field_length); + tscFieldInfoCalOffset(&pSql->cmd); + return tscSetValueToResObj(pSql, rowLen); +} + +// todo add order support +static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) { + // the result structure has been completed in sql parse, so we + // only need to reorganize the results in the column format + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + SMetricMeta *pMetricMeta = pCmd->pMetricMeta; + SSchema * pSchema = tsGetTagSchema(pCmd->pMeterMeta); + + int32_t vOffset[TSDB_MAX_COLUMNS] = {0}; + for (int32_t f = 1; f < pCmd->numOfReqTags; ++f) { + int16_t tagColumnIndex = pCmd->tagColumnIndex[f - 1]; + if (tagColumnIndex == -1) { + vOffset[f] = vOffset[f - 1] + TSDB_METER_NAME_LEN; + } else { + vOffset[f] = vOffset[f - 1] + pSchema[tagColumnIndex].bytes; + } + } + + int32_t totalNumOfResults = pMetricMeta->numOfMeters; + int32_t rowLen = tscGetResRowLength(pCmd); + + tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen); + + int32_t rowIdx = 0; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; ++i) { + SVnodeSidList *pSidList = (SVnodeSidList *)((char *)pMetricMeta + pMetricMeta->list[i]); + + for (int32_t j = 0; j < pSidList->numOfSids; ++j) { + SMeterSidExtInfo *pSidExt = tscGetMeterSidInfo(pSidList, j); + + for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SColIndex *pColIndex = &tscSqlExprGet(pCmd, k)->colInfo; + int32_t offsetId = pColIndex->colIdx; + + assert(pColIndex->isTag); + + char * val = pSidExt->tags + vOffset[offsetId]; + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k); + + memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, k) * totalNumOfResults + pField->bytes * rowIdx, val, + (size_t)pField->bytes); + } + rowIdx++; + } + } + + return 0; +} + +static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + SMetricMeta *pMetricMeta = pCmd->pMetricMeta; + int32_t totalNumOfResults = 1; // count function only produce one result + int32_t rowLen = tscGetResRowLength(pCmd); + + tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen); + + int32_t rowIdx = 0; + for (int32_t i = 0; i < totalNumOfResults; ++i) { + for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + + if (pExpr->colInfo.colIdx == -1 && pExpr->sqlFuncId == TSDB_FUNC_COUNT) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k); + + memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, i) * totalNumOfResults + pField->bytes * rowIdx, + &pMetricMeta->numOfMeters, sizeof(pMetricMeta->numOfMeters)); + } else { + tscError("not support operations"); + continue; + } + } + rowIdx++; + } + + return 0; +} + +static int tscProcessQueryTags(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + + SMeterMeta *pMeterMeta = pCmd->pMeterMeta; + if (pMeterMeta == NULL || pMeterMeta->numOfTags == 0 || pMeterMeta->numOfColumns == 0) { + strcpy(pCmd->payload, "invalid table"); + pSql->res.code = TSDB_CODE_INVALID_TABLE; + return pSql->res.code; + } + + SSqlExpr *pExpr = tscSqlExprGet(pCmd, 0); + if (pExpr->sqlFuncId == TSDB_FUNC_COUNT) { + return tscBuildMetricTagSqlFunctionResult(pSql); + } else { + return tscBuildMetricTagProjectionResult(pSql); + } +} + +int tscProcessLocalCmd(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + + if (pCmd->command == TSDB_SQL_CFG_LOCAL) { + pSql->res.code = (uint8_t)tsCfgDynamicOptions(pCmd->payload); + } else if (pCmd->command == TSDB_SQL_DESCRIBE_TABLE) { + pSql->res.code = (uint8_t)tscProcessDescribeTable(pSql); + } else if (pCmd->command == TSDB_SQL_RETRIEVE_TAGS) { + pSql->res.code = (uint8_t)tscProcessQueryTags(pSql); + } else if (pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { + pSql->res.qhandle = 0x1; // pass the qhandle check + pSql->res.numOfRows = 0; + } else if (pCmd->command == TSDB_SQL_RESET_CACHE) { + taosClearDataCache(tscCacheHandle); + } else { + pSql->res.code = TSDB_CODE_INVALID_SQL; + tscError("%p not support command:%d", pSql, pCmd->command); + } + + //keep the code in local variable in order to avoid invalid read in case of async query + int32_t code = pSql->res.code; + + if (pSql->fp != NULL) { // callback function + if (code == 0) { + (*pSql->fp)(pSql->param, pSql, 0); + } else { + tscQueueAsyncRes(pSql); + } + } + + return code; +} diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c new file mode 100644 index 000000000000..1951b1249db4 --- /dev/null +++ b/src/client/src/tscParseInsert.c @@ -0,0 +1,1195 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE /* See feature_test_macros(7) */ +#define _GNU_SOURCE + +#define _XOPEN_SOURCE + +#pragma GCC diagnostic ignored "-Woverflow" + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ihash.h" +#include "tscSecondaryMerge.h" +#include "tscUtil.h" +#include "tschemautil.h" +#include "tsclient.h" +#include "tsqldef.h" +#include "ttypes.h" + +#include "tlog.h" +#include "tstoken.h" +#include "ttime.h" + +#define INVALID_SQL_RET_MSG(p, ...) \ + do { \ + sprintf(p, __VA_ARGS__); \ + return TSDB_CODE_INVALID_SQL; \ + } while (0) + +static void setErrMsg(char* msg, char* sql); +static int32_t tscAllocateMemIfNeed(SInsertedDataBlocks* pDataBlock, int32_t rowSize); + +// get formation +static int32_t getNumericType(const char* data) { + if (*data == '-' || *data == '+') { + data += 1; + } + + if (data[0] == '0') { + if (data[1] == 'x' || data[1] == 'X') { + return TK_HEX; + } else { + return TK_OCT; + } + } else { + return TK_INTEGER; + } +} + +static int64_t tscToInteger(char* data, char** endPtr) { + int32_t numType = getNumericType(data); + int32_t radix = 10; + + if (numType == TK_HEX) { + radix = 16; + } else if (numType == TK_OCT) { + radix = 8; + } + + return strtoll(data, endPtr, radix); +} + +int tsParseTime(char* value, int32_t valuelen, int64_t* time, char** next, char* error, int16_t timePrec) { + char* token; + int tokenlen; + int64_t interval; + + int64_t useconds = 0; + + char* pTokenEnd = *next; + tscGetToken(pTokenEnd, &token, &tokenlen); + if (tokenlen == 0) { + INVALID_SQL_RET_MSG(error, "missing time stamp"); + } + + if (strncmp(value, "now", 3) == 0 && valuelen == 3) { + useconds = taosGetTimestamp(timePrec); + } else if (strncmp(value, "0", 1) == 0 && valuelen == 1) { + // do nothing + } else if (value[4] != '-') { + for (int32_t i = 0; i < valuelen; ++i) { + /* + * filter illegal input. + * e.g., nw, tt, ff etc. + */ + if (value[i] < '0' || value[i] > '9') { + return TSDB_CODE_INVALID_SQL; + } + } + useconds = str2int64(value); + } else { + // strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm); + if (taosParseTime(value, time, valuelen, timePrec) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; + } + + for (int k = valuelen; value[k] != '\0'; k++) { + if (value[k] == ' ' || value[k] == '\t') continue; + if (value[k] == ',') { + *next = pTokenEnd; + *time = useconds; + return 0; + } + + break; + } + + /* + * time expression: + * e.g., now+12a, now-5h + */ + pTokenEnd = tscGetToken(pTokenEnd, &token, &tokenlen); + if (tokenlen && (*token == '+' || *token == '-')) { + pTokenEnd = tscGetToken(pTokenEnd, &value, &valuelen); + if (valuelen < 2) { + strcpy(error, "value is expected"); + return TSDB_CODE_INVALID_SQL; + } + + if (getTimestampInUsFromStr(value, valuelen, &interval) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + if (timePrec == TSDB_TIME_PRECISION_MILLI) { + interval /= 1000; + } + + if (*token == '+') { + useconds += interval; + } else { + useconds = (useconds >= interval) ? useconds - interval : 0; + } + + *next = pTokenEnd; + } + + *time = useconds; + return TSDB_CODE_SUCCESS; +} + +int32_t tsParseOneColumnData(SSchema* pSchema, char* value, int valuelen, char* payload, char* msg, char** str, + bool primaryKey, int16_t timePrec) { + int64_t temp; + int32_t nullInt = *(int32_t*)TSDB_DATA_NULL_STR_L; + char* endptr = NULL; + errno = 0; // reset global error code + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *(uint8_t*)payload = TSDB_DATA_BOOL_NULL; + } else { + if (strncmp(value, "true", valuelen) == 0) { + *(uint8_t*)payload = TSDB_TRUE; + } else if (strncmp(value, "false", valuelen) == 0) { + *(uint8_t*)payload = TSDB_FALSE; + } else { + int64_t v = strtoll(value, NULL, 10); + *(uint8_t*)payload = (int8_t)((v == 0) ? TSDB_FALSE : TSDB_TRUE); + } + } + break; + } + case TSDB_DATA_TYPE_TINYINT: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *((int32_t*)payload) = TSDB_DATA_TINYINT_NULL; + } else { + int64_t v = tscToInteger(value, &endptr); + if (errno == ERANGE || v > INT8_MAX || v < INT8_MIN) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + + int8_t v8 = (int8_t)v; + if (isNull((char*)&v8, pSchema->type)) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + + *((int8_t*)payload) = v8; + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *((int32_t*)payload) = TSDB_DATA_SMALLINT_NULL; + } else { + int64_t v = tscToInteger(value, &endptr); + + if (errno == ERANGE || v > INT16_MAX || v < INT16_MIN) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + + int16_t v16 = (int16_t)v; + if (isNull((char*)&v16, pSchema->type)) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + + *((int16_t*)payload) = v16; + } + break; + + case TSDB_DATA_TYPE_INT: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *((int32_t*)payload) = TSDB_DATA_INT_NULL; + } else { + int64_t v = tscToInteger(value, &endptr); + + if (errno == ERANGE || v > INT32_MAX || v < INT32_MIN) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + + int32_t v32 = (int32_t)v; + if (isNull((char*)&v32, pSchema->type)) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + + *((int32_t*)payload) = v32; + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *((int64_t*)payload) = TSDB_DATA_BIGINT_NULL; + } else { + int64_t v = tscToInteger(value, &endptr); + if (isNull((char*)&v, pSchema->type) || errno == ERANGE) { + INVALID_SQL_RET_MSG(msg, "data is overflow"); + } + *((int64_t*)payload) = v; + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *((int32_t*)payload) = TSDB_DATA_FLOAT_NULL; + } else { + float v = (float)strtod(value, &endptr); + if (isNull((char*)&v, pSchema->type) || isinf(v) || isnan(v)) { + *((int32_t*)payload) = TSDB_DATA_FLOAT_NULL; + } else { + *((float*)payload) = v; + } + + if (str != NULL) { + // This if statement is just for Fanuc case, when a float point number is quoted by + // quotes, we need to skip the quote. But this is temporary, it should be changed in the future. + if (*endptr == '\'' || *endptr == '\"') endptr++; + *str = endptr; + } + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *((int64_t*)payload) = TSDB_DATA_DOUBLE_NULL; + } else { + double v = strtod(value, &endptr); + if (isNull((char*)&v, pSchema->type) || isinf(v) || isnan(v)) { + *((int32_t*)payload) = TSDB_DATA_FLOAT_NULL; + } else { + *((double*)payload) = v; + } + + if (str != NULL) { + // This if statement is just for Fanuc case, when a float point number is quoted by + // quotes, we need to skip the quote. But this is temporary, it should be changed in the future. + if (*endptr == '\'' || *endptr == '\"') endptr++; + *str = endptr; + } + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *payload = TSDB_DATA_BINARY_NULL; + } else { + /* truncate too long string */ + if (valuelen > pSchema->bytes) valuelen = pSchema->bytes; + strncpy(payload, value, valuelen); + } + + break; + + case TSDB_DATA_TYPE_NCHAR: + if (valuelen == 4 && nullInt == *(int32_t*)value) { + *(uint32_t*)payload = TSDB_DATA_NCHAR_NULL; + } else { + if (!taosMbsToUcs4(value, valuelen, payload, pSchema->bytes)) { + sprintf(msg, "%s", strerror(errno)); + return TSDB_CODE_INVALID_SQL; + } + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (valuelen == 4 && nullInt == *(int32_t*)value) { + if (primaryKey) { + *((int64_t*)payload) = 0; + } else { + *((int64_t*)payload) = TSDB_DATA_BIGINT_NULL; + } + } else { + if (tsParseTime(value, valuelen, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + *((int64_t*)payload) = temp; + } + + break; + } + } + + return 0; +} + +// todo merge the error msg function with tSQLParser +static void setErrMsg(char* msg, char* sql) { + char msgFormat[] = "near \"%s\" syntax error"; + const int32_t BACKWARD_CHAR_STEP = 15; + + // only extract part of sql string,avoid too long sql string cause stack over flow + char buf[64] = {0}; + strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1); + sprintf(msg, msgFormat, buf); +} + +int tsParseOneRowData(char** str, char* payload, SSchema schema[], SParsedDataColInfo* spd, char* error, + int16_t timePrec) { + char* value = NULL; + int valuelen = 0; + + /* 1. set the parsed value from sql string */ + int32_t rowSize = 0; + for (int i = 0; i < spd->numOfParsedCols; ++i) { + /* the start position in data block buffer of current value in sql */ + char* start = payload + spd->elems[i].offset; + int16_t colIndex = spd->elems[i].colIndex; + rowSize += schema[colIndex].bytes; + + int sign = 0; + _again: + *str = tscGetToken(*str, &value, &valuelen); + if ((valuelen == 0 && value == NULL) || (valuelen == 1 && value[0] == ')')) { + setErrMsg(error, *str); + return -1; + } + + /* support positive/negative integer/float data format */ + if ((*value == '+' || *value == '-') && valuelen == 1) { + sign = 1; + goto _again; + } + + if (sign) { + value = value - 1; + /* backward to include the +/- symbol */ + valuelen++; + } + + int32_t ret = tsParseOneColumnData(&schema[colIndex], value, valuelen, start, error, str, + colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX, timePrec); + if (ret != 0) { + return -1; // NOTE: here 0 mean error! + } + } + + /*2. set the null value for the rest columns */ + if (spd->numOfParsedCols < spd->numOfCols) { + char* ptr = payload; + + for (int32_t i = 0; i < spd->numOfCols; ++i) { + if (!spd->hasVal[i]) { + /* current column do not have any value to insert, set it to null */ + setNull(ptr, schema[i].type, schema[i].bytes); + } + + ptr += schema[i].bytes; + } + + rowSize = ptr - payload; + } + + return rowSize; +} + +int tsParseValues(char** str, SInsertedDataBlocks* pDataBlock, SMeterMeta* pMeterMeta, int maxRows, + SParsedDataColInfo* spd, char* error) { + char* token; + int tokenlen; + + SSchema* pSchema = tsGetSchema(pMeterMeta); + + int16_t numOfRows = 0; + pDataBlock->size += sizeof(SShellSubmitBlock); + + if (spd->hasVal[0] == false) { + sprintf(error, "primary timestamp column can not be null"); + return -1; + } + + while (1) { + char* tmp = tscGetToken(*str, &token, &tokenlen); + if (tokenlen == 0 || *token != '(') break; + + *str = tmp; + if (numOfRows >= maxRows || + pDataBlock->size + pMeterMeta->rowSize + sizeof(SShellSubmitBlock) >= pDataBlock->nAllocSize) { + maxRows += tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize); + } + + int32_t len = tsParseOneRowData(str, pDataBlock->pData + pDataBlock->size, pSchema, spd, error, + pMeterMeta->precision); + if (len <= 0) { + setErrMsg(error, *str); + return -1; + } + + pDataBlock->size += len; + + *str = tscGetToken(*str, &token, &tokenlen); + if (tokenlen == 0 || *token != ')') { + setErrMsg(error, *str); + return -1; + } + + numOfRows++; + } + + if (numOfRows <= 0) { + strcpy(error, "no any data points"); + return -1; + } else { + return numOfRows; + } +} + +static void appendDataBlock(SDataBlockList* pList, SInsertedDataBlocks* pBlocks) { + if (pList->nSize >= pList->nAlloc) { + pList->nAlloc = pList->nAlloc << 1; + pList->pData = realloc(pList->pData, (size_t)pList->nAlloc); + + // reset allocated memory + memset(pList->pData + pList->nSize, 0, POINTER_BYTES * (pList->nAlloc - pList->nSize)); + } + + pList->pData[pList->nSize++] = pBlocks; +} + +static void tscSetAllColumnsHasValue(SParsedDataColInfo* spd, SSchema* pSchema, int32_t numOfCols) { + spd->numOfCols = numOfCols; + spd->numOfParsedCols = numOfCols; + + for (int32_t i = 0; i < numOfCols; ++i) { + spd->hasVal[i] = true; + spd->elems[i].colIndex = i; + + if (i > 0) { + spd->elems[i].offset = spd->elems[i - 1].offset + pSchema[i - 1].bytes; + } + } +} + +int32_t tscAllocateMemIfNeed(SInsertedDataBlocks* pDataBlock, int32_t rowSize) { + size_t remain = pDataBlock->nAllocSize - pDataBlock->size; + + // expand the allocated size + if (remain <= sizeof(SShellSubmitBlock) + rowSize) { + int32_t oldSize = pDataBlock->nAllocSize; + + pDataBlock->nAllocSize = (uint32_t)(oldSize * 1.5); + + char* tmp = realloc(pDataBlock->pData, (size_t)pDataBlock->nAllocSize); + if (tmp != NULL) { + pDataBlock->pData = tmp; + } else { + // do nothing + } + } + + return (int32_t)(pDataBlock->nAllocSize - pDataBlock->size - sizeof(SShellSubmitBlock)) / rowSize; +} + +void tsSetBlockInfo(SShellSubmitBlock* pBlocks, const SMeterMeta* pMeterMeta, int32_t numOfRows) { + pBlocks->sid = htonl(pMeterMeta->sid); + pBlocks->uid = htobe64(pMeterMeta->uid); + pBlocks->sversion = htonl(pMeterMeta->sversion); + pBlocks->numOfRows = htons(numOfRows); +} + +static int32_t doParseInsertStatement(SSqlCmd* pCmd, SSqlRes* pRes, void* pDataBlockHashList, char** str, + SParsedDataColInfo* spd) { + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + int32_t numOfRows = 0; + + SInsertedDataBlocks** pData = (SInsertedDataBlocks**)taosGetIntHashData(pDataBlockHashList, pMeterMeta->vgid); + SInsertedDataBlocks* dataBuf = NULL; + + /* no data in hash list */ + if (pData == NULL) { + dataBuf = tscCreateDataBlock(TSDB_PAYLOAD_SIZE); + + /* here we only keep the pointer of chunk of buffer, not the whole buffer */ + dataBuf = *(SInsertedDataBlocks**)taosAddIntHash(pDataBlockHashList, pCmd->pMeterMeta->vgid, (char*)&dataBuf); + + dataBuf->size = tsInsertHeadSize; + strncpy(dataBuf->meterId, pCmd->name, tListLen(pCmd->name)); + appendDataBlock(pCmd->pDataBlocks, dataBuf); + } else { + dataBuf = *pData; + } + + int32_t maxNumOfRows = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize); + int64_t startPos = dataBuf->size; + + numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload); + if (numOfRows <= 0) { + return TSDB_CODE_INVALID_SQL; + } + + SShellSubmitBlock* pBlocks = (SShellSubmitBlock*)(dataBuf->pData + startPos); + tsSetBlockInfo(pBlocks, pMeterMeta, numOfRows); + dataBuf->numOfMeters += 1; + + /* + * the value of pRes->numOfRows does not affect the true result of AFFECTED ROWS, which is + * actually returned from server. + * + * * NOTE: + * The better way is to use a local variable to store the number of rows that + * has been extracted from sql expression string, and avoid to do the invalid write check + */ + pRes->numOfRows += numOfRows; + return TSDB_CODE_SUCCESS; +} + +static int32_t tscParseSqlForCreateTableOnDemand(char** sqlstr, SSqlObj* pSql) { + char* id = NULL; + int32_t idlen = 0; + int32_t code = TSDB_CODE_SUCCESS; + + SSqlCmd* pCmd = &pSql->cmd; + char* sql = *sqlstr; + + sql = tscGetToken(sql, &id, &idlen); + + /* build the token of specified table */ + SSQLToken tableToken = {.z = id, .n = idlen, .type = TK_ID}; + + char* cstart = NULL; + char* cend = NULL; + + /* skip possibly exists column list */ + sql = tscGetToken(sql, &id, &idlen); + int32_t numOfColList = 0; + bool createTable = false; + + if (id[0] == '(' && idlen == 1) { + cstart = &id[0]; + while (1) { + sql = tscGetToken(sql, &id, &idlen); + if (id[0] == ')' && idlen == 1) { + cend = &id[0]; + break; + } + + ++numOfColList; + } + + sql = tscGetToken(sql, &id, &idlen); + } + + if (numOfColList == 0 && cstart != NULL) { + return TSDB_CODE_INVALID_SQL; + } + + if (strncmp(id, "using", idlen) == 0 && idlen == 5) { + /* create table if not exists */ + sql = tscGetToken(sql, &id, &idlen); + STagData* pTag = (STagData*)pCmd->payload; + + SSQLToken token1 = {idlen, TK_ID, id}; + setMeterID(pSql, &token1); + + strcpy(pTag->name, pSql->cmd.name); + + code = tscGetMeterMeta(pSql, pTag->name); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + char* tagVal = pTag->data; + SSchema* pTagSchema = tsGetTagSchema(pCmd->pMeterMeta); + + sql = tscGetToken(sql, &id, &idlen); + if (!(strncmp(id, "tags", idlen) == 0 && idlen == 4)) { + setErrMsg(pCmd->payload, sql); + return TSDB_CODE_INVALID_SQL; + } + + int32_t numOfTagValues = 0; + while (1) { + sql = tscGetToken(sql, &id, &idlen); + if (idlen == 0) { + break; + } else if (idlen == 1) { + if (id[0] == '(') { + continue; + } + + if (id[0] == ')') { + break; + } + + if (id[0] == '-' || id[0] == '+') { + sql = tscGetToken(sql, &id, &idlen); + + id -= 1; + idlen += 1; + } + } + + code = tsParseOneColumnData(&pTagSchema[numOfTagValues], id, idlen, tagVal, pCmd->payload, &sql, false, + pCmd->pMeterMeta->precision); + if (code != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd->payload, sql); + return TSDB_CODE_INVALID_SQL; + } + + tagVal += pTagSchema[numOfTagValues++].bytes; + } + + if (numOfTagValues != pCmd->pMeterMeta->numOfTags) { + setErrMsg(pCmd->payload, sql); + return TSDB_CODE_INVALID_SQL; + } + + if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd->payload, sql); + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = setMeterID(pSql, &tableToken); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + createTable = true; + code = tscGetMeterMetaEx(pSql, pSql->cmd.name, true); + } else { + if (cstart != NULL) { + sql = cstart; + } else { + sql = id; + } + code = tscGetMeterMeta(pSql, pCmd->name); + } + + int32_t len = cend - cstart + 1; + if (cstart != NULL && createTable == true) { + /* move the column list to start position of the next accessed points */ + memcpy(sql - len, cstart, len); + *sqlstr = sql - len; + } else { + *sqlstr = sql; + } + + return code; +} + +/** + * usage: insert into table1 values() () table2 values()() + * + * @param pCmd + * @param str + * @param acct + * @param db + * @param pSql + * @return + */ +int tsParseInsertStatement(SSqlCmd* pCmd, char* str, char* acct, char* db, SSqlObj* pSql) { + const int32_t RESERVED_SIZE = 1024; + + pCmd->command = TSDB_SQL_INSERT; + pCmd->isInsertFromFile = -1; + pCmd->count = 0; + + pSql->res.numOfRows = 0; + + if (!pSql->pTscObj->writeAuth) { + return TSDB_CODE_NO_RIGHTS; + } + + char* id; + int idlen; + int code = TSDB_CODE_INVALID_SQL; + + str = tscGetToken(str, &id, &idlen); + if (idlen == 0 || (strncmp(id, "into", 4) != 0 || idlen != 4)) { + INVALID_SQL_RET_MSG(pCmd->payload, "keyword INTO is expected"); + } + + if ((code = tscAllocPayloadWithSize(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { + return code; + } + + void* pDataBlockHashList = taosInitIntHash(4, POINTER_BYTES, taosHashInt); + + pSql->cmd.pDataBlocks = tscCreateBlockArrayList(); + tscTrace("%p create data block list for submit data, %p", pSql, pSql->cmd.pDataBlocks); + + while (1) { + tscGetToken(str, &id, &idlen); + if (idlen == 0) { + if ((pSql->res.numOfRows > 0) || (1 == pCmd->isInsertFromFile)) { + break; + } else { // no data in current sql string, error + code = TSDB_CODE_INVALID_SQL; + goto _error_clean; + } + } + + SSQLToken token = {idlen, TK_ID, id}; + if ((code = setMeterID(pSql, &token)) != TSDB_CODE_SUCCESS) { + goto _error_clean; + } + + void* fp = pSql->fp; + if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) { + if (fp != NULL) { + goto _clean; + } else { + /* + * for async insert, the free data block operations, which is tscDestroyBlockArrayList, + * must be executed before launch another threads to get metermeta, since the + * later ops may manipulate SSqlObj through another thread in getMeterMetaCallback function. + */ + goto _error_clean; + } + } + + if (UTIL_METER_IS_METRIC(pCmd)) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "insert data into metric is not supported"); + goto _error_clean; + } + + str = tscGetToken(str, &id, &idlen); + if (idlen == 0) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "keyword VALUES or FILE are required"); + goto _error_clean; + } + + if (strncmp(id, "values", 6) == 0 && idlen == 6) { + SParsedDataColInfo spd = {0}; + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + tscSetAllColumnsHasValue(&spd, pSchema, pCmd->pMeterMeta->numOfColumns); + + if (pCmd->isInsertFromFile == -1) { + pCmd->isInsertFromFile = 0; + } else { + if (pCmd->isInsertFromFile == 1) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up"); + goto _error_clean; + } + } + + /* + * app here insert data in different vnodes, so we need to set the following + * data in another submit procedure using async insert routines + */ + code = doParseInsertStatement(pCmd, &pSql->res, pDataBlockHashList, &str, &spd); + if (code != TSDB_CODE_SUCCESS) { + goto _error_clean; + } + + } else if (strncmp(id, "file", 4) == 0 && idlen == 4) { + if (pCmd->isInsertFromFile == -1) { + pCmd->isInsertFromFile = 1; + } else { + if (pCmd->isInsertFromFile == 0) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up"); + goto _error_clean; + } + } + + str = tscGetTokenDelimiter(str, &id, &idlen, " ;"); + if (idlen == 0) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "filename is required following keyword FILE"); + goto _error_clean; + } + + // char fname[TSDB_FILENAME_LEN] = "\0"; + char* fname = malloc(idlen + 1); + memset(fname, 0, idlen + 1); + memcpy(fname, id, idlen); + wordexp_t full_path; + if (wordexp(fname, &full_path, 0) != 0) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "invalid filename"); + free(fname); + goto _error_clean; + } + + strcpy(fname, full_path.we_wordv[0]); + wordfree(&full_path); + + SInsertedDataBlocks* dataBuf = tscCreateDataBlock(strlen(fname) + sizeof(SInsertedDataBlocks) + 1); + strcpy(dataBuf->filename, fname); + + dataBuf->size = strlen(fname) + 1; + free(fname); + + strcpy(dataBuf->meterId, pCmd->name); + appendDataBlock(pCmd->pDataBlocks, dataBuf); + + str = id + idlen; + } else if (idlen == 1 && id[0] == '(') { + /* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */ + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta); + + if (pCmd->isInsertFromFile == -1) { + pCmd->isInsertFromFile = 0; + } else if (pCmd->isInsertFromFile == 1) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up"); + goto _error_clean; + } + + SParsedDataColInfo spd = { + pMeterMeta->numOfColumns, 0, {{0}}, {0}, + }; + + int16_t offset[TSDB_MAX_COLUMNS] = {0}; + for (int32_t t = 1; t < pMeterMeta->numOfColumns; ++t) { + offset[t] = offset[t - 1] + pSchema[t - 1].bytes; + } + + while (1) { + str = tscGetToken(str, &id, &idlen); + if (idlen == 1 && id[0] == ')') { + break; + } + + bool findColumnIndex = false; + + // todo speedup by using hash list + for (int32_t t = 0; t < pMeterMeta->numOfColumns; ++t) { + if (strncmp(id, pSchema[t].name, idlen) == 0 && strlen(pSchema[t].name) == idlen) { + SParsedColElem* pElem = &spd.elems[spd.numOfParsedCols++]; + pElem->offset = offset[t]; + pElem->colIndex = t; + + if (spd.hasVal[t] == true) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "duplicated column name"); + goto _error_clean; + } + + spd.hasVal[t] = true; + findColumnIndex = true; + break; + } + } + + if (!findColumnIndex) { // + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "invalid column name"); + goto _error_clean; + } + } + + if (spd.numOfParsedCols == 0 || spd.numOfParsedCols > pMeterMeta->numOfColumns) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "column name expected"); + goto _error_clean; + } + + str = tscGetToken(str, &id, &idlen); + if (strncmp(id, "values", idlen) != 0 || idlen != 6) { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "keyword VALUES is expected"); + goto _error_clean; + } + + code = doParseInsertStatement(pCmd, &pSql->res, pDataBlockHashList, &str, &spd); + if (code != TSDB_CODE_SUCCESS) { + goto _error_clean; + } + } else { + code = TSDB_CODE_INVALID_SQL; + sprintf(pCmd->payload, "keyword VALUES or FILE are required"); + goto _error_clean; + } + } + + /* submit to more than one vnode */ + if (pCmd->pDataBlocks->nSize > 0) { + // lihui: if import file, only malloc the size of file name + if (1 != pCmd->isInsertFromFile) { + tscFreeUnusedDataBlocks(pCmd->pDataBlocks); + + SInsertedDataBlocks* pDataBlock = pCmd->pDataBlocks->pData[0]; + if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { + goto _error_clean; + } + } + + pCmd->vnodeIdx = 1; // set the next sent data vnode index in data block arraylist + } else { + tscDestroyBlockArrayList(&pCmd->pDataBlocks); + } + + code = TSDB_CODE_SUCCESS; + goto _clean; + +_error_clean: + tscDestroyBlockArrayList(&pCmd->pDataBlocks); + +_clean: + taosCleanUpIntHash(pDataBlockHashList); + return code; +} + +int tsParseImportStatement(SSqlObj* pSql, char* str, char* acct, char* db) { + SSqlCmd* pCmd = &pSql->cmd; + pCmd->order.order = TSQL_SO_ASC; + return tsParseInsertStatement(pCmd, str, acct, db, pSql); +} + +int tsParseInsertSql(SSqlObj* pSql, char* sql, char* acct, char* db) { + char* verb; + int verblen; + int code = TSDB_CODE_INVALID_SQL; + + SSqlCmd* pCmd = &pSql->cmd; + tscCleanSqlCmd(pCmd); + + sql = tscGetToken(sql, &verb, &verblen); + + if (verblen) { + if (strncmp(verb, "insert", 6) == 0 && verblen == 6) { + code = tsParseInsertStatement(pCmd, sql, acct, db, pSql); + } else if (strncmp(verb, "import", 6) == 0 && verblen == 6) { + code = tsParseImportStatement(pSql, sql, acct, db); + } else { + verb[verblen] = 0; + sprintf(pCmd->payload, "invalid keyword:%s", verb); + } + } else { + sprintf(pCmd->payload, "no any keywords"); + } + + // sql object has not been released in async model + if (pSql->signature == pSql) { + pSql->res.numOfRows = 0; + } + + return code; +} + +int tsParseSql(SSqlObj* pSql, char* acct, char* db, bool multiVnodeInsertion) { + int32_t ret = TSDB_CODE_SUCCESS; + + if (tscIsInsertOrImportData(pSql->sqlstr)) { + /* + * only for async multi-vnode insertion Set the fp before parse the sql string, in case of getmetermeta failed, + * in which the error handle callback function can rightfully restore the user defined function (fp) + */ + if (pSql->fp != NULL && multiVnodeInsertion) { + assert(pSql->fetchFp == NULL); + pSql->fetchFp = pSql->fp; + + /* replace user defined callback function with multi-insert proxy function*/ + pSql->fp = tscAsyncInsertMultiVnodesProxy; + } + + ret = tsParseInsertSql(pSql, pSql->sqlstr, acct, db); + } else { + SSqlInfo SQLInfo = {0}; + tSQLParse(&SQLInfo, pSql->sqlstr); + ret = tscToSQLCmd(pSql, &SQLInfo); + SQLInfoDestroy(&SQLInfo); + } + + /* + * the pRes->code may be modified or even released by another thread in tscMeterMetaCallBack + * function, so do NOT use pRes->code to determine if the getMeterMeta/getMetricMeta function + * invokes new threads to get data from mnode or simply retrieves data from cache. + * + * do NOT assign return code to pRes->code for the same reason for it may be released by another thread + * pRes->code = ret; + */ + return ret; +} + +static int tscInsertDataFromFile(SSqlObj* pSql, FILE* fp) { + // TODO : import data from file + int readLen = 0; + char* line = NULL; + size_t len = 0; + uint32_t maxRows = 0; + SSqlCmd* pCmd = &pSql->cmd; + char* pStart = pCmd->payload + tsInsertHeadSize; + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + int numOfRows = 0; + uint32_t rowSize = pMeterMeta->rowSize; + char error[128] = "\0"; + SShellSubmitBlock* pBlock = (SShellSubmitBlock*)(pStart); + pStart += sizeof(SShellSubmitBlock); + int nrows = 0; + + const int32_t RESERVED_SIZE = 1024; + + maxRows = (TSDB_PAYLOAD_SIZE - RESERVED_SIZE - sizeof(SShellSubmitBlock)) / rowSize; + if (maxRows < 1) return -1; + + int count = 0; + SParsedDataColInfo spd = {0}; + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + tscSetAllColumnsHasValue(&spd, pSchema, pCmd->pMeterMeta->numOfColumns); + + while ((readLen = getline(&line, &len, fp)) != -1) { + // line[--readLen] = '\0'; + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0; + char* lineptr = line; + strtolower(line, line); + len = tsParseOneRowData(&lineptr, pStart, pSchema, &spd, error, pCmd->pMeterMeta->precision); + if (len <= 0) return -1; + pStart += len; + + count++; + nrows++; + if (count >= maxRows) { + pCmd->payloadLen = (pStart - pCmd->payload); + pBlock->sid = htonl(pMeterMeta->sid); + pBlock->numOfRows = htons(count); + if (tscProcessSql(pSql) != 0) { + return -1; + } + numOfRows += pSql->res.numOfRows; + count = 0; + memset(pCmd->payload, 0, TSDB_PAYLOAD_SIZE); + pStart = pCmd->payload + tsInsertHeadSize; + pBlock = (SShellSubmitBlock*)(pStart); + pStart += sizeof(SShellSubmitBlock); + } + } + + if (count > 0) { + pCmd->payloadLen = (pStart - pCmd->payload); + pBlock->sid = htonl(pMeterMeta->sid); + pBlock->numOfRows = htons(count); + if (tscProcessSql(pSql) != 0) { + return -1; + } + numOfRows += pSql->res.numOfRows; + } + + if (line) tfree(line); + return numOfRows; +} + +/* multi-vnodes insertion in sync query model + * + * modify history + * 2019.05.10 lihui + * Remove the code for importing records from files + */ +void tscProcessMultiVnodesInsert(SSqlObj* pSql) { + SSqlCmd* pCmd = &pSql->cmd; + if (pCmd->command != TSDB_SQL_INSERT) { + return; + } + + SInsertedDataBlocks* pDataBlock = NULL; + int32_t code = TSDB_CODE_SUCCESS; + + /* the first block has been sent to server in processSQL function */ + assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL); + + if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) { + SDataBlockList* pDataBlocks = pCmd->pDataBlocks; + + for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) { + pDataBlock = pDataBlocks->pData[i]; + if (pDataBlock == NULL) { + continue; + } + + if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { + tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize); + continue; + } + + tscProcessSql(pSql); + } + } + + // all data have been submit to vnode, release data blocks + tscDestroyBlockArrayList(&pCmd->pDataBlocks); +} + +/* multi-vnodes insertion in sync query model */ +void tscProcessMultiVnodesInsertForFile(SSqlObj* pSql) { + SSqlCmd* pCmd = &pSql->cmd; + if (pCmd->command != TSDB_SQL_INSERT) { + return; + } + + SInsertedDataBlocks* pDataBlock = NULL; + int32_t affected_rows = 0; + + assert(pCmd->isInsertFromFile == 1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL); + + SDataBlockList* pDataBlocks = pCmd->pDataBlocks; + + pCmd->isInsertFromFile = 0; // for tscProcessSql() + + pSql->res.numOfRows = 0; + for (int32_t i = 0; i < pDataBlocks->nSize; ++i) { + pDataBlock = pDataBlocks->pData[i]; + if (pDataBlock == NULL) { + continue; + } + + tscAllocPayloadWithSize(pCmd, TSDB_PAYLOAD_SIZE); + + pCmd->count = 1; + + FILE* fp = fopen(pDataBlock->filename, "r"); + if (fp == NULL) { + tscError("%p Failed to open file %s to insert data from file", pSql, pDataBlock->filename); + continue; + } + + strcpy(pCmd->name, pDataBlock->meterId); + tscGetMeterMeta(pSql, pCmd->name); + int nrows = tscInsertDataFromFile(pSql, fp); + if (nrows < 0) { + fclose(fp); + tscTrace("%p There is no record in file %s", pSql, pDataBlock->filename); + continue; + } + fclose(fp); + + affected_rows += nrows; + + tscTrace("%p Insert data %d records from file %s", pSql, nrows, pDataBlock->filename); + } + + pSql->res.numOfRows = affected_rows; + + // all data have been submit to vnode, release data blocks + tscDestroyBlockArrayList(&pCmd->pDataBlocks); +} diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c new file mode 100644 index 000000000000..019b0e4a4ce0 --- /dev/null +++ b/src/client/src/tscProfile.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "tlog.h" +#include "tsclient.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +void tscAddIntoSqlList(SSqlObj *pSql) { + static uint32_t queryId = 1; + + STscObj *pObj = pSql->pTscObj; + if (pSql->listed) return; + + pthread_mutex_lock(&pObj->mutex); + + assert(pSql != pObj->sqlList); + pSql->next = pObj->sqlList; + if (pObj->sqlList) pObj->sqlList->prev = pSql; + pObj->sqlList = pSql; + pSql->queryId = queryId++; + + pthread_mutex_unlock(&pObj->mutex); + + pSql->stime = taosGetTimestampMs(); + pSql->listed = 1; + + tscTrace("%p added into sqlList", pSql); +} + +void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) { + if (code < 0) { + tscError("failed to save slowquery, code:%d", code); + } +} + +void tscSaveSlowQueryFp(void *handle, void *tmrId) { + char *sql = handle; + + static void *taos = NULL; + if (taos == NULL) { + taos = taos_connect(NULL, "sys", tsInternalPass, NULL, 0); + if (taos == NULL) { + tscError("failed to save slow query, can't connect to server"); + free(sql); + return; + } + } + + tscTrace("save slow query:sql", sql); + taos_query_a(taos, sql, tscSaveSlowQueryFpCb, NULL); + free(sql); +} + +void tscSaveSlowQuery(SSqlObj *pSql) { + const static int64_t SLOW_QUERY_INTERVAL = 3000000L; + if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return; + + tscTrace("%p query time:%ld sql:%s", pSql, pSql->res.useconds, pSql->sqlstr); + + char *sql = malloc(200); + int len = snprintf(sql, 200, "insert into sys.slowquery values(now, '%s', %ld, %ld, '", pSql->pTscObj->user, + pSql->stime, pSql->res.useconds); + int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr); + if (sqlLen > TSDB_SHOW_SQL_LEN - 1) { + sqlLen = len + TSDB_SHOW_SQL_LEN - 1; + } else { + sqlLen += len; + } + strcpy(sql + sqlLen, "')"); + + taosTmrStart(tscSaveSlowQueryFp, 200, sql, tscTmr); +} + +void tscRemoveFromSqlList(SSqlObj *pSql) { + STscObj *pObj = pSql->pTscObj; + if (pSql->listed == 0) return; + + pthread_mutex_lock(&pObj->mutex); + + if (pSql->prev) + pSql->prev->next = pSql->next; + else + pObj->sqlList = pSql->next; + + if (pSql->next) pSql->next->prev = pSql->prev; + + pthread_mutex_unlock(&pObj->mutex); + + pSql->next = NULL; + pSql->prev = NULL; + pSql->listed = 0; + + tscSaveSlowQuery(pSql); + tscTrace("%p removed from sqlList", pSql); +} + +void tscKillQuery(STscObj *pObj, uint32_t killId) { + pthread_mutex_lock(&pObj->mutex); + + SSqlObj *pSql = pObj->sqlList; + while (pSql) { + if (pSql->queryId == killId) break; + pSql = pSql->next; + } + + pthread_mutex_unlock(&pObj->mutex); + + if (pSql == NULL) return; + + tscTrace("%p query is killed, queryId:%d thandle:%p", pSql, killId, pSql->thandle); + taos_stop_query(pSql); +} + +void tscAddIntoStreamList(SSqlStream *pStream) { + static uint32_t streamId = 1; + STscObj * pObj = pStream->pSql->pTscObj; + + pthread_mutex_lock(&pObj->mutex); + + pStream->next = pObj->streamList; + if (pObj->streamList) pObj->streamList->prev = pStream; + pObj->streamList = pStream; + pStream->streamId = streamId++; + + pthread_mutex_unlock(&pObj->mutex); + + pStream->listed = 1; +} + +void tscRemoveFromStreamList(SSqlStream *pStream, SSqlObj *pSqlObj) { + if (pStream->listed == 0) return; + + STscObj *pObj = pSqlObj->pTscObj; + + pthread_mutex_lock(&pObj->mutex); + + if (pStream->prev) + pStream->prev->next = pStream->next; + else + pObj->streamList = pStream->next; + + if (pStream->next) pStream->next->prev = pStream->prev; + + pthread_mutex_unlock(&pObj->mutex); + + pStream->next = NULL; + pStream->prev = NULL; + + pStream->listed = 0; +} + +void tscKillStream(STscObj *pObj, uint32_t killId) { + pthread_mutex_lock(&pObj->mutex); + + SSqlStream *pStream = pObj->streamList; + while (pStream) { + if (pStream->streamId == killId) break; + pStream = pStream->next; + } + + pthread_mutex_unlock(&pObj->mutex); + + tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); + + taos_close_stream(pStream); + if (pStream->callback) { + pStream->callback(pStream->param); + } +} + +char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) { + SQList *pQList = (SQList *)pMsg; + char * pMax = pMsg + TSDB_PAYLOAD_SIZE - 256; + + SQDesc *pQdesc = pQList->qdesc; + pQList->numOfQueries = 0; + + // We extract the lock to tscBuildHeartBeatMsg function. + /* pthread_mutex_lock (&pObj->mutex); */ + + pMsg += sizeof(SQList); + SSqlObj *pSql = pObj->sqlList; + while (pSql) { + strncpy(pQdesc->sql, pSql->sqlstr, TSDB_SHOW_SQL_LEN - 1); + pQdesc->sql[TSDB_SHOW_SQL_LEN - 1] = 0; + pQdesc->stime = pSql->stime; + pQdesc->queryId = pSql->queryId; + pQdesc->useconds = pSql->res.useconds; + + pQList->numOfQueries++; + pQdesc++; + pSql = pSql->next; + pMsg += sizeof(SQDesc); + if (pMsg > pMax) break; + } + + SSList *pSList = (SSList *)pMsg; + SSDesc *pSdesc = pSList->sdesc; + pSList->numOfStreams = 0; + + pMsg += sizeof(SSList); + SSqlStream *pStream = pObj->streamList; + while (pStream) { + strncpy(pSdesc->sql, pStream->pSql->sqlstr, TSDB_SHOW_SQL_LEN - 1); + pSdesc->sql[TSDB_SHOW_SQL_LEN - 1] = 0; + pSdesc->streamId = pStream->streamId; + pSdesc->num = pStream->num; + + pSdesc->useconds = pStream->useconds; + pSdesc->stime = pStream->stime - pStream->interval; + pSdesc->ctime = pStream->ctime; + + pSdesc->slidingTime = pStream->slidingTime; + pSdesc->interval = pStream->interval; + + pSList->numOfStreams++; + pSdesc++; + pStream = pStream->next; + pMsg += sizeof(SSDesc); + if (pMsg > pMax) break; + } + + /* pthread_mutex_unlock (&pObj->mutex); */ + + return pMsg; +} + +void tscKillConnection(STscObj *pObj) { + pthread_mutex_lock(&pObj->mutex); + + SSqlObj *pSql = pObj->sqlList; + while (pSql) { + taosStopRpcConn(pSql->thandle); + pSql = pSql->next; + } + + SSqlStream *pStream = pObj->streamList; + while (pStream) { + taos_close_stream(pStream); + pStream = pStream->next; + } + + pthread_mutex_unlock(&pObj->mutex); + + taos_close(pObj); + + tscTrace("connection:%p is killed", pObj); +} diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c new file mode 100644 index 000000000000..00c2b8b8bb01 --- /dev/null +++ b/src/client/src/tscSQLParser.c @@ -0,0 +1,3969 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _XOPEN_SOURCE +#define _BSD_SOURCE + +#include +#include +#if defined(LINUX) +#include +#endif +#include +#include +#include +#include +#include + +#include "taos.h" +#include "tstoken.h" +#include "ttime.h" + +#include "tscUtil.h" +#include "tschemautil.h" +#include "tsclient.h" +#include "tsql.h" + +typedef struct SColumnIdList { + SSchema* pSchema; + int32_t numOfCols; + int32_t numOfRecordedCols; + int32_t ids[TSDB_MAX_COLUMNS]; +} SColumnIdList; + +typedef struct SColumnList { + int32_t numOfCols; + int32_t ids[TSDB_MAX_COLUMNS]; +} SColumnList; + +static void setProjExprForMetricQuery(SSqlCmd* pCmd, int32_t fieldIDInResult, int32_t colIdx); + +static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); + +static bool has(tFieldList* pFieldList, int32_t offset, char* name); + +static char* getAccountId(SSqlObj* pSql); + +static void getCurrentDBName(SSqlObj* pSql, SSQLToken* pDBToken); +static bool hasSpecifyDB(SSQLToken* pTableName); +static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd); + +static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd); + +static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* len); + +static int32_t getColumnIndexByName(SSQLToken* pToken, SSchema* pSchema, int32_t numOfCols); + +static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nLen); +static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName); + +static int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem); + +static int32_t insertResultField(SSqlCmd* pCmd, int32_t fieldIDInResult, SColumnList* pIdList, int16_t bytes, + int8_t type, char* fieldName); +static int32_t changeFunctionID(int32_t optr, int16_t* pExpr); + +static void setErrMsg(SSqlCmd* pCmd, char* pzErrMsg, int32_t maxLen); + +static int32_t buildSelectionClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric); + +static bool validateIpAddress(char* ip); +static bool onlyQueryMetricTags(SSqlCmd* pCmd); +static bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd); +static bool functionCompatibleCheck(SSqlCmd* pCmd); + +static void setColumnOffsetValueInResultset(SSqlCmd* pCmd); +static int32_t setGroupByClause(SSqlCmd* pCmd, tVariantList* pList); + +static int32_t setIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); +static int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql); + +static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SSchema* pSchema, tSQLExprItem* pItem, bool isMet); + +static int32_t buildQueryCond(SSqlObj* pSql, tSQLExpr* pExpr); +static int32_t setFillPolicy(SSqlCmd* pCmd, SQuerySQL* pQuerySQL); +static int32_t setOrderByClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols); + +static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd); +static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField); +static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); +static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd); +static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString, SColumnIdList* colIdList); +static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd); +static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols); +static int32_t validateDNodeConfig(tDCLSQL* pOptions); +static int32_t validateColumnName(char* name); +static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo); +static bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd); +static void updateTagColumnIndex(SSqlCmd* pCmd); +static int32_t setLimitOffsetValueInfo(SSqlObj* pSql, SQuerySQL* pQuerySql); +static void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex); + +static int32_t tscQueryOnlyMetricTags(SSqlCmd* pCmd, bool* queryOnMetricTags) { + assert(pCmd->metricQuery == 1); + + // here colIdx == -1 means the special column tbname that is the name of each table + *queryOnMetricTags = true; + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + + if (pExpr->sqlFuncId != TSDB_FUNC_TAGPRJ && + !(pExpr->sqlFuncId == TSDB_FUNC_COUNT && pExpr->colInfo.colIdx == -1)) { // 23 == "tagprj" function + *queryOnMetricTags = false; + break; + } + } + + return TSDB_CODE_SUCCESS; +} + +// todo handle memory leak in error handle function +int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { + if (pInfo == NULL || pSql == NULL || pSql->signature != pSql) { + return TSDB_CODE_APP_ERROR; + } + + SSqlCmd* pCmd = &(pSql->cmd); + + if (!pInfo->validSql) { + setErrMsg(pCmd, pInfo->pzErrMsg, tListLen(pInfo->pzErrMsg)); + return TSDB_CODE_INVALID_SQL; + } + + tscCleanSqlCmd(pCmd); + tscAllocPayloadWithSize(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); + + // transfer pInfo into select operation + switch (pInfo->sqlType) { + case DROP_TABLE: + case DROP_USER: + case DROP_ACCOUNT: + case DROP_DATABASE: { + char msg[] = "param name too long"; + char msg1[] = "invalid ip address"; + char msg2[] = "invalid name"; + + SSQLToken* pzName = &pInfo->pDCLInfo->a[0]; + if ((pInfo->sqlType != DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + if (pInfo->sqlType == DROP_DATABASE) { + assert(pInfo->pDCLInfo->nTokens == 2); + + pCmd->command = TSDB_SQL_DROP_DB; + pCmd->existsCheck = (pInfo->pDCLInfo->a[1].n == 1); + + int32_t code = setObjFullName(pCmd->name, getAccountId(pSql), pzName, NULL, NULL); + if (code != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + } + + return code; + } else if (pInfo->sqlType == DROP_TABLE) { + assert(pInfo->pDCLInfo->nTokens == 2); + + pCmd->existsCheck = (pInfo->pDCLInfo->a[1].n == 1); + pCmd->command = TSDB_SQL_DROP_TABLE; + + int32_t ret = setMeterID(pSql, pzName); + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + } + return ret; + } else { + if (pzName->n > TSDB_USER_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (pInfo->sqlType == DROP_USER) { + pCmd->command = TSDB_SQL_DROP_USER; + } + + strncpy(pCmd->name, pzName->z, pzName->n); + return TSDB_CODE_SUCCESS; + } + } + + case USE_DATABASE: { + char msg[] = "db name too long"; + pCmd->command = TSDB_SQL_USE_DB; + + SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; + if (pToken->n > TSDB_DB_NAME_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = setObjFullName(pCmd->name, getAccountId(pSql), pToken, NULL, NULL); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + break; + } + + case RESET_QUERY_CACHE: { + pCmd->command = TSDB_SQL_RESET_CACHE; + break; + } + + case SHOW_DATABASES: + case SHOW_TABLES: + case SHOW_STABLES: + case SHOW_DNODES: + case SHOW_USERS: + case SHOW_VGROUPS: + case SHOW_CONNECTIONS: + case SHOW_QUERIES: + case SHOW_STREAMS: + case SHOW_SCORES: + case SHOW_CONFIGS: { + return setShowInfo(pSql, pInfo); + } + + case ALTER_DATABASE: + case CREATE_DATABASE: { + char msg2[] = "name too long"; + char msg3[] = "invalid db name"; + char msg4[] = "invalid time precision"; + int32_t STORAGE_LEVEL = 3; + + if (pInfo->sqlType == ALTER_DATABASE) { + pCmd->command = TSDB_SQL_ALTER_DB; + } else { + pCmd->command = TSDB_SQL_CREATE_DB; + pCmd->existsCheck = (pInfo->pDCLInfo->a[0].n == 1); + } + + SCreateDBSQL* pCreateDB = &(pInfo->pDCLInfo->dbOpt); + if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = setObjFullName(pCmd->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL); + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return ret; + } + + /* in case of TSDB_SQL_ALTER_DB, other parameters will be ignore by mnode.*/ + pCmd->defaultVal[0] = pCreateDB->nReplica; + pCmd->defaultVal[1] = pCreateDB->nDays; + + pCmd->defaultVal[3] = pCreateDB->nRowsInFileBlock; + pCmd->defaultVal[4] = pCreateDB->nCacheBlockSize; + pCmd->defaultVal[5] = pCreateDB->numOfBlocksPerTable; + + pCmd->defaultVal[6] = pCreateDB->nTablesPerVnode; + pCmd->defaultVal[7] = pCreateDB->commitTime; + pCmd->defaultVal[8] = pCreateDB->commitLog; + pCmd->defaultVal[9] = pCreateDB->compressionLevel; + + *(double*)&(pCmd->defaultVal[10]) = pCreateDB->nCacheNumOfBlocks; + + if (pCreateDB->keep != NULL) { + pCmd->defaultVal[11] = pCreateDB->keep->nExpr; + + for (int32_t i = 0; i < STORAGE_LEVEL; ++i) { + if (i < pCreateDB->keep->nExpr) { + pCmd->defaultVal[12 + i] = pCreateDB->keep->a[i].pVar.i64Key; + } else { + pCmd->defaultVal[12 + i] = pCreateDB->keep->a[0].pVar.i64Key; + } + } + + } else { + for (int32_t i = 0; i < STORAGE_LEVEL; ++i) { + pCmd->defaultVal[12 + i] = -1; + } + } + + /* force to set 3 */ + pCmd->defaultVal[11] = 3; + const int32_t TIME_PRECISION_INDEX = 15; + + if (pCreateDB->precision.n > 0) { + if (strncmp(pCreateDB->precision.z, TSDB_TIME_PRECISION_MILLI_STR, pCreateDB->precision.n) == 0 && + strlen(TSDB_TIME_PRECISION_MILLI_STR) == pCreateDB->precision.n) { + /*time precision for this db: million second */ + pCmd->defaultVal[TIME_PRECISION_INDEX] = TSDB_TIME_PRECISION_MILLI; + } else if (strncmp(pCreateDB->precision.z, TSDB_TIME_PRECISION_MICRO_STR, pCreateDB->precision.n) == 0 && + strlen(TSDB_TIME_PRECISION_MICRO_STR) == pCreateDB->precision.n) { + pCmd->defaultVal[TIME_PRECISION_INDEX] = TSDB_TIME_PRECISION_MICRO; + } else { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } + } else { // millisecond by default + pCmd->defaultVal[TIME_PRECISION_INDEX] = TSDB_TIME_PRECISION_MILLI; + } + break; + } + + case CREATE_USER: { + pCmd->command = (pInfo->sqlType == CREATE_USER) ? TSDB_SQL_CREATE_USER : TSDB_SQL_CREATE_ACCT; + assert(pInfo->pDCLInfo->nTokens >= 2); + + char msg[] = "name or password too long"; + char msg1[] = "password can not be empty"; + char msg2[] = "invalid user/account name"; + char msg3[] = "password needs single quote marks enclosed"; + char msg4[] = "invalid state option, available options[no, r, w, all]"; + + if (pInfo->pDCLInfo->a[1].type != TK_STRING) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + strdequote(pInfo->pDCLInfo->a[1].z); + strtrim(pInfo->pDCLInfo->a[1].z); // trim space before and after passwords + pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); + + if (pInfo->pDCLInfo->a[1].n <= 0) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name + strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd + break; + } + case ALTER_ACCT: { + pCmd->command = TSDB_SQL_ALTER_ACCT; + int32_t num = pInfo->pDCLInfo->nTokens; + assert(num >= 1 && num <= 2); + + char msg[] = "password too long"; + char msg1[] = "password can not be empty"; + char msg2[] = "invalid user/account name"; + char msg3[] = "password needs single quote marks enclosed"; + char msg4[] = "invalid state option, available options[no, r, w, all]"; + + if (num == 2) { + if (pInfo->pDCLInfo->a[1].type != TK_STRING) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + strdequote(pInfo->pDCLInfo->a[1].z); + strtrim(pInfo->pDCLInfo->a[1].z); // trim space before and after passwords + pInfo->pDCLInfo->a[1].n = strlen(pInfo->pDCLInfo->a[1].z); + + if (pInfo->pDCLInfo->a[1].n <= 0) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->payload, pInfo->pDCLInfo->a[1].z, pInfo->pDCLInfo->a[1].n); // passwd + } + + if (pInfo->pDCLInfo->a[0].n > TSDB_USER_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (tscValidateName(&pInfo->pDCLInfo->a[0]) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->name, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); // name + + SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt; + pCmd->defaultVal[0] = pAcctOpt->users; + pCmd->defaultVal[1] = pAcctOpt->dbs; + pCmd->defaultVal[2] = pAcctOpt->tseries; + pCmd->defaultVal[3] = pAcctOpt->streams; + pCmd->defaultVal[4] = pAcctOpt->pps; + pCmd->defaultVal[5] = pAcctOpt->storage; + pCmd->defaultVal[6] = pAcctOpt->qtime; + pCmd->defaultVal[7] = pAcctOpt->conns; + + if (pAcctOpt->stat.n == 0) { + pCmd->defaultVal[8] = -1; + } else { + strdequote(pAcctOpt->stat.z); + pAcctOpt->stat.n = strlen(pAcctOpt->stat.z); + + if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) { + pCmd->defaultVal[8] = TSDB_VN_READ_ACCCESS; + } else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) { + pCmd->defaultVal[8] = TSDB_VN_WRITE_ACCCESS; + } else if (strncmp(pAcctOpt->stat.z, "all", 3) == 0 && pAcctOpt->stat.n == 3) { + pCmd->defaultVal[8] = TSDB_VN_ALL_ACCCESS; + } else if (strncmp(pAcctOpt->stat.z, "no", 2) == 0 && pAcctOpt->stat.n == 2) { + pCmd->defaultVal[8] = 0; + } else { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } + } + break; + } + case DESCRIBE_TABLE: { + pCmd->command = TSDB_SQL_DESCRIBE_TABLE; + + SSQLToken* pToken = &pInfo->pDCLInfo->a[0]; + char msg[] = "table name is too long"; + + if (pToken->n > TSDB_METER_NAME_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (setMeterID(pSql, pToken) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = tscGetMeterMeta(pSql, pSql->cmd.name); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + break; + } + case ALTER_DNODE: + case ALTER_USER_PASSWD: + case ALTER_USER_PRIVILEGES: { + pCmd->command = (pInfo->sqlType == ALTER_DNODE) ? TSDB_SQL_CFG_PNODE : TSDB_SQL_ALTER_USER; + + tDCLSQL* pDCL = pInfo->pDCLInfo; + + char msg[] = "parameters too long"; + char msg1[] = "invalid ip address"; + char msg2[] = "invalid configure options or values"; + char msg3[] = "password can not be empty"; + + if (pInfo->sqlType != ALTER_DNODE) { + strdequote(pDCL->a[1].z); + strtrim(pDCL->a[1].z); + pDCL->a[1].n = strlen(pDCL->a[1].z); + } + + if (pDCL->a[1].n <= 0) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + if (pDCL->a[0].n > TSDB_METER_NAME_LEN || pDCL->a[1].n > TSDB_PASSWORD_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (pCmd->command == TSDB_SQL_CFG_PNODE) { + char ip[128] = {0}; + strncpy(ip, pDCL->a[0].z, pDCL->a[0].n); + + /* validate the ip address */ + if (!validateIpAddress(ip)) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + strcpy(pCmd->name, ip); + + /* validate the parameter names and options */ + if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); + + if (pDCL->nTokens == 3) { + pCmd->payload[pDCL->a[1].n] = ' '; // add sep + strncpy(&pCmd->payload[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n); + } + } else { + char msg[] = "invalid user rights"; + char msg1[] = "password can not be empty or larger than 24 characters"; + + strncpy(pCmd->name, pDCL->a[0].z, pDCL->a[0].n); + + if (pInfo->sqlType == ALTER_USER_PASSWD) { + /* update the password for user */ + pCmd->order.order |= TSDB_ALTER_USER_PASSWD; + + strdequote(pDCL->a[1].z); + pDCL->a[1].n = strlen(pDCL->a[1].z); + + if (pDCL->a[1].n <= 0 || pInfo->pDCLInfo->a[1].n > TSDB_PASSWORD_LEN) { + /* password cannot be empty string */ + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->payload, pDCL->a[1].z, pDCL->a[1].n); + } else if (pInfo->sqlType == ALTER_USER_PRIVILEGES) { + pCmd->order.order |= TSDB_ALTER_USER_PRIVILEGES; + + if (strncasecmp(pDCL->a[1].z, "super", 5) == 0 && pDCL->a[1].n == 5) { + pCmd->count = 1; + } else if (strncasecmp(pDCL->a[1].z, "read", 4) == 0 && pDCL->a[1].n == 4) { + pCmd->count = 2; + } else if (strncasecmp(pDCL->a[1].z, "write", 5) == 0 && pDCL->a[1].n == 5) { + pCmd->count = 3; + } else { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + } else { + return TSDB_CODE_INVALID_SQL; + } + } + break; + } + case ALTER_LOCAL: { + pCmd->command = TSDB_SQL_CFG_LOCAL; + char msg[] = "parameter too long"; + if (pInfo->pDCLInfo->a[0].n > TSDB_METER_ID_LEN) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->payload, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n); + break; + } + case TSQL_CREATE_NORMAL_METER: + case TSQL_CREATE_NORMAL_METRIC: { + char msg[] = "table name too long"; + char msg1[] = "invalid table name"; + + tFieldList* pFieldList = pInfo->pCreateTableInfo->colInfo.pColumns; + tFieldList* pTagList = pInfo->pCreateTableInfo->colInfo.pTagColumns; + assert(pFieldList != NULL); + + pCmd->command = TSDB_SQL_CREATE_TABLE; + pCmd->existsCheck = pInfo->pCreateTableInfo->existCheck; + + // if sql specifies db, use it, otherwise use default db + SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); + + if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (setMeterID(pSql, pzTableName) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (!validateTableColumnInfo(pFieldList, pCmd) || + (pTagList != NULL && !validateTagParams(pTagList, pFieldList, pCmd))) { + return TSDB_CODE_INVALID_SQL; + } + + int32_t col = 0; + for (; col < pFieldList->nField; ++col) { + tscFieldInfoSetValFromField(&pCmd->fieldsInfo, col, &pFieldList->p[col]); + } + pCmd->numOfCols = (int16_t)pFieldList->nField; + + if (pTagList != NULL) { // create metric[optional] + for (int32_t i = 0; i < pTagList->nField; ++i) { + tscFieldInfoSetValFromField(&pCmd->fieldsInfo, col++, &pTagList->p[i]); + } + pCmd->count = pTagList->nField; + } + + break; + } + case TSQL_CREATE_METER_FROM_METRIC: { + pCmd->command = TSDB_SQL_CREATE_TABLE; + pCmd->existsCheck = pInfo->pCreateTableInfo->existCheck; + + char msg[] = "invalid table name"; + char msg1[] = "illegal value or data overflow"; + char msg2[] = "illegal number of tags"; + + // table name + // metric name, create table by using dst + SSQLToken* pToken = &(pInfo->pCreateTableInfo->usingInfo.metricName); + int32_t ret = setMeterID(pSql, pToken); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + // get meter meta from mnode + STagData* pTag = (STagData*)pCmd->payload; + strncpy(pTag->name, pCmd->name, TSDB_METER_ID_LEN); + + tVariantList* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; + + int32_t code = tscGetMeterMeta(pSql, pTag->name); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pSql->cmd.pMeterMeta->numOfTags != pList->nExpr) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + /* too long tag values will be truncated automatically */ + SSchema* pTagSchema = tsGetTagSchema(pCmd->pMeterMeta); + + char* tagVal = pTag->data; + for (int32_t i = 0; i < pList->nExpr; ++i) { + int32_t ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + tagVal += pTagSchema[i].bytes; + } + + if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + ret = setMeterID(pSql, &pInfo->pCreateTableInfo->name); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + pCmd->numOfCols = 0; + pCmd->count = 0; + break; + } + case TSQL_CREATE_STREAM: { + pCmd->command = TSDB_SQL_CREATE_TABLE; + char msg[] = "table name too long"; + char msg1[] = "invalid table name"; + + // if sql specifies db, use it, otherwise use default db + SSQLToken* pzTableName = &(pInfo->pCreateTableInfo->name); + SQuerySQL* pQuerySql = pInfo->pCreateTableInfo->pSelect; + + if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + SSQLToken* pSrcMeterName = &pInfo->pCreateTableInfo->pSelect->from; + if (tscValidateName(pSrcMeterName) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (setMeterID(pSql, pSrcMeterName) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t code = tscGetMeterMeta(pSql, pCmd->name); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + bool isMetric = UTIL_METER_IS_METRIC(pCmd); + if (buildSelectionClause(pCmd, pQuerySql->pSelection, isMetric) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (pQuerySql->pWhere != NULL) { // query condition in stream computing + if (buildQueryCond(pSql, pQuerySql->pWhere) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set interval value + if (setIntervalClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } else { + if ((pCmd->nAggTimeInterval > 0) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_INVALID_SQL; + } + } + + if (setSlidingClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set the created table[stream] name + if (setMeterID(pSql, pzTableName) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + // copy sql length + tscAllocPayloadWithSize(pCmd, pQuerySql->selectToken.n + 8); + + strncpy(pCmd->payload, pQuerySql->selectToken.z, pQuerySql->selectToken.n); + if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) { + char msg4[] = "sql too long"; // todo ADD support + setErrMsg(pCmd, msg, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } + + if (tsRewriteFieldNameIfNecessary(pCmd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + pCmd->numOfCols = pCmd->fieldsInfo.numOfOutputCols; + + if (validateSqlFunctionInStreamSql(pCmd) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + /* + * check if fill operation is available, the fill operation is parsed and executed during query execution, not here. + */ + if (pQuerySql->fillType != NULL) { + if (pCmd->nAggTimeInterval == 0) { + char msg1[] = "fill only available for interval query"; + setErrMsg(pCmd, msg1, tListLen(msg1)); + + return TSDB_CODE_INVALID_SQL; + } + + tVariantListItem* pItem = &pQuerySql->fillType->a[0]; + if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) { + if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) || + (strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) { + char msg2[] = "fill option not supported in stream computing"; + setErrMsg(pCmd, msg2, tListLen(msg2)); + + return TSDB_CODE_INVALID_SQL; + } + } + } + + break; + } + + case TSQL_QUERY_METER: { + SQuerySQL* pQuerySql = pInfo->pQueryInfo; + assert(pQuerySql != NULL); + + // too many result columns not support order by in query + if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { + char msg[] = "too many columns in selection clause"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + strdequote(pQuerySql->from.z); + if (setMeterID(pSql, &pQuerySql->from) != TSDB_CODE_SUCCESS) { + char msg[] = "table name too long"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + pSql->cmd.command = TSDB_SQL_SELECT; + + int32_t code = tscGetMeterMeta(pSql, pCmd->name); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + bool isMetric = UTIL_METER_IS_METRIC(pCmd); + if (buildSelectionClause(pCmd, pQuerySql->pSelection, isMetric) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set interval value + if (setIntervalClause(pCmd, pQuerySql) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } else { + if ((pCmd->nAggTimeInterval > 0) && (validateFunctionsInIntervalOrGroupbyQuery(pCmd) != TSDB_CODE_SUCCESS)) { + return TSDB_CODE_INVALID_SQL; + } + } + + // set sliding value + SSQLToken* pSliding = &pQuerySql->sliding; + if (pSliding->n != 0) { + if (!tscEmbedded) { + char msg[] = "not support sliding in query"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + code = getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); + if (pCmd->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { + pCmd->nSlidingTime /= 1000; + } + + char msg3[] = "sliding value too small"; + char msg4[] = "sliding value no larger than the interval value"; + + if (pCmd->nSlidingTime < tsMinSlidingTime) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } + } + + if (setGroupByClause(pCmd, pQuerySql->pGroupby) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set order by info + if (setOrderByClause(pCmd, pQuerySql, tsGetSchema(pCmd->pMeterMeta), pCmd->pMeterMeta->numOfColumns) != + TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // set where info + if (pQuerySql->pWhere != NULL) { + if (buildQueryCond(pSql, pQuerySql->pWhere) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (pCmd->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { + pCmd->stime = pCmd->stime / 1000; + pCmd->etime = pCmd->etime / 1000; + } + } else { // set the time range + pCmd->stime = 0; + pCmd->etime = INT64_MAX; + } + + // no result due to invalid query time range + if (pCmd->stime > pCmd->etime) { + pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + + if (!hasTimestampForPointInterpQuery(pCmd)) { + char msg[] = "point interpolation query needs timestamp"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (pQuerySql->fillType != NULL) { + char msg1[] = "fill only available for interval query"; + char msg2[] = "start(end) time of query range required or time range too large"; + + if (pCmd->nAggTimeInterval == 0 && (!tscIsPointInterpQuery(pCmd))) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (pCmd->nAggTimeInterval > 0) { + int64_t timeRange = labs(pCmd->stime - pCmd->etime); + // number of result is not greater than 10,000,000 + + // TODO define macro + if ((timeRange == 0) || (timeRange / pCmd->nAggTimeInterval) > 10000000) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + } + + int32_t ret = setFillPolicy(pCmd, pQuerySql); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + + // handle the limit offset value, validate the limit + pCmd->limit = pQuerySql->limit; + + /* temporarily save the original limitation value */ + if ((code = setLimitOffsetValueInfo(pSql, pQuerySql)) != TSDB_CODE_SUCCESS) { + return code; + } + + setColumnOffsetValueInResultset(pCmd); + updateTagColumnIndex(pCmd); + break; + } + case TSQL_INSERT: { + assert(false); + } + case ALTER_TABLE_ADD_COLUMN: + case ALTER_TABLE_DROP_COLUMN: + case ALTER_TABLE_TAGS_ADD: + case ALTER_TABLE_TAGS_DROP: + case ALTER_TABLE_TAGS_CHG: + case ALTER_TABLE_TAGS_SET: { + return setAlterTableInfo(pSql, pInfo); + } + + case KILL_CONNECTION: + case KILL_QUERY: + case KILL_STREAM: { + return setKillInfo(pSql, pInfo); + } + + default: + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +static bool isTopBottomQuery(SSqlCmd* pCmd) { + if (pCmd->exprsInfo.numOfExprs != 2) { + return false; + } + + int32_t functionId = tscSqlExprGet(pCmd, 1)->sqlFuncId; + return functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TOP_DST || + functionId == TSDB_FUNC_BOTTOM_DST; +} + +int32_t setIntervalClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { + if (pQuerySql->interval.type == 0) { + return TSDB_CODE_SUCCESS; + } + + // interval is not null + SSQLToken* t = &pQuerySql->interval; + if (getTimestampInUsFromStr(t->z, t->n, &pCmd->nAggTimeInterval) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + /* revised the time precision according to the flag */ + if (pCmd->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { + pCmd->nAggTimeInterval = pCmd->nAggTimeInterval / 1000; + } + + /* parser has filter the illegal type, no need to check here */ + pCmd->intervalTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1]; + + // interval cannot be less than 10 milliseconds + if (pCmd->nAggTimeInterval < tsMinIntervalTime) { + char msg[] = "interval cannot be less than 10 ms"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + // for top/bottom + interval query, we do not add additional timestamp column in the front + if (isTopBottomQuery(pCmd)) { + return TSDB_CODE_SUCCESS; + } + + // need to add timestamp column in resultset, if interval is existed + tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS, 0, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE); + + SColumnList ids = {.numOfCols = 1, .ids = {0}}; + int32_t ret = insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); + + return ret; +} + +int32_t setSlidingClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql) { + char msg0[] = "sliding value too small"; + char msg1[] = "sliding value no larger than the interval value"; + + SSQLToken* pSliding = &pQuerySql->sliding; + + if (pSliding->n != 0) { + getTimestampInUsFromStr(pSliding->z, pSliding->n, &pCmd->nSlidingTime); + if (pCmd->pMeterMeta->precision == TSDB_TIME_PRECISION_MILLI) { + pCmd->nSlidingTime /= 1000; + } + + if (pCmd->nSlidingTime < tsMinSlidingTime) { + setErrMsg(pCmd, msg0, tListLen(msg0)); + return TSDB_CODE_INVALID_SQL; + } + + if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t setMeterID(SSqlObj *pSql, SSQLToken *pzTableName) { + SSqlCmd *pCmd = &(pSql->cmd); + int32_t ret = TSDB_CODE_SUCCESS; + + //clear array + memset(pCmd->name, 0, tListLen(pCmd->name)); + char msg[] = "name too long"; + + if (hasSpecifyDB(pzTableName)) { + /* + * db has been specified in sql string + * so we ignore current db path + */ + ret = setObjFullName(pCmd->name, getAccountId(pSql), NULL, pzTableName, NULL); + } else { + /* get current DB name first, then set it into path */ + SSQLToken t = {0}; + getCurrentDBName(pSql, &t); + + ret = setObjFullName(pCmd->name, NULL, &t, pzTableName, NULL); + } + + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + } + + return ret; +} + +static bool validateTableColumnInfo(tFieldList* pFieldList, SSqlCmd* pCmd) { + assert(pFieldList != NULL); + + char msg[] = "illegal number of columns"; + char msg1[] = "first column must be timestamp"; + char msg2[] = "row length exceeds max length"; + char msg3[] = "duplicated column names"; + char msg4[] = "invalid data types"; + char msg5[] = "invalid binary/nchar column length"; + char msg6[] = "invalid column name"; + + // number of fields no less than 2 + if (pFieldList->nField <= 1 || pFieldList->nField > TSDB_MAX_COLUMNS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return false; + } + + // first column must be timestamp + if (pFieldList->p[0].type != TSDB_DATA_TYPE_TIMESTAMP) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return false; + } + + int32_t nLen = 0; + for (int32_t i = 0; i < pFieldList->nField; ++i) { + nLen += pFieldList->p[i].bytes; + } + + // max row length must be less than TSDB_MAX_BYTES_PER_ROW + if (nLen > TSDB_MAX_BYTES_PER_ROW) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return false; + } + + // field name must be unique + for (int32_t i = 0; i < pFieldList->nField; ++i) { + TAOS_FIELD* pField = &pFieldList->p[i]; + if (pField->type < TSDB_DATA_TYPE_BOOL || pField->type > TSDB_DATA_TYPE_NCHAR) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return false; + } + + if ((pField->type == TSDB_DATA_TYPE_BINARY && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_BINARY_LEN)) || + (pField->type == TSDB_DATA_TYPE_NCHAR && (pField->bytes <= 0 || pField->bytes > TSDB_MAX_NCHAR_LEN))) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return false; + } + + if (validateColumnName(pField->name) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg6, tListLen(msg6)); + return false; + } + + if (has(pFieldList, i + 1, pFieldList->p[i].name) == true) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return false; + } + } + + return true; +} + +static bool validateTagParams(tFieldList* pTagsList, tFieldList* pFieldList, SSqlCmd* pCmd) { + assert(pTagsList != NULL); + + char msg1[] = "invalid number of tag columns"; + char msg2[] = "tag length too long"; + char msg3[] = "duplicated column names"; + char msg4[] = "timestamp not allowed in tags"; + char msg5[] = "invalid data type in tags"; + char msg6[] = "invalid tag name"; + char msg7[] = "invalid binary/nchar tag length"; + + // number of fields at least 1 + if (pTagsList->nField < 1 || pTagsList->nField > TSDB_MAX_TAGS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return false; + } + + int32_t nLen = 0; + for (int32_t i = 0; i < pTagsList->nField; ++i) { + nLen += pTagsList->p[i].bytes; + } + + // max tag row length must be less than TSDB_MAX_TAGS_LEN + if (nLen > TSDB_MAX_TAGS_LEN) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return false; + } + + // field name must be unique + for (int32_t i = 0; i < pTagsList->nField; ++i) { + if (has(pFieldList, 0, pTagsList->p[i].name) == true) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return false; + } + } + + /* timestamp in tag is not allowed */ + for (int32_t i = 0; i < pTagsList->nField; ++i) { + if (pTagsList->p[i].type == TSDB_DATA_TYPE_TIMESTAMP) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return false; + } + + if (pTagsList->p[i].type < TSDB_DATA_TYPE_BOOL || pTagsList->p[i].type > TSDB_DATA_TYPE_NCHAR) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return false; + } + + if ((pTagsList->p[i].type == TSDB_DATA_TYPE_BINARY && pTagsList->p[i].bytes <= 0) || + (pTagsList->p[i].type == TSDB_DATA_TYPE_NCHAR && pTagsList->p[i].bytes <= 0)) { + setErrMsg(pCmd, msg7, tListLen(msg7)); + return false; + } + + if (validateColumnName(pTagsList->p[i].name) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg6, tListLen(msg6)); + return false; + } + + if (has(pTagsList, i + 1, pTagsList->p[i].name) == true) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return false; + } + } + + return true; +} + +/* + * tags name /column name is truncated in sql.y + */ +bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) { + char msg1[] = "timestamp not allowed in tags"; + char msg2[] = "duplicated column names"; + char msg3[] = "tag length too long"; + char msg4[] = "invalid tag name"; + char msg5[] = "invalid binary/nchar tag length"; + char msg6[] = "invalid data type in tags"; + + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + + // no more than 6 tags + if (pMeterMeta->numOfTags == TSDB_MAX_TAGS) { + char msg[128] = {0}; + sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS); + + setErrMsg(pCmd, msg, strlen(msg)); + return false; + } + + // no timestamp allowable + if (pTagField->type == TSDB_DATA_TYPE_TIMESTAMP) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return false; + } + + if (pTagField->type < TSDB_DATA_TYPE_BOOL && pTagField->type > TSDB_DATA_TYPE_NCHAR) { + setErrMsg(pCmd, msg6, tListLen(msg6)); + return false; + } + + SSchema* pTagSchema = tsGetTagSchema(pCmd->pMeterMeta); + int32_t nLen = 0; + + for (int32_t i = 0; i < pMeterMeta->numOfTags; ++i) { + nLen += pTagSchema[i].bytes; + } + + // length less than TSDB_MAX_TASG_LEN + if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return false; + } + + // tags name can not be a keyword + if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return false; + } + + // binary(val), val can not be equalled to or less than 0 + if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return false; + } + + // field name must be unique + SSchema* pSchema = tsGetSchema(pMeterMeta); + + for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { + if (strncasecmp(pTagField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return false; + } + } + + return true; +} + +bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) { + char msg1[] = "too many columns"; + char msg2[] = "duplicated column names"; + char msg3[] = "column length too long"; + char msg4[] = "invalid data types"; + char msg5[] = "invalid column name"; + char msg6[] = "invalid column length"; + + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + + // no more max columns + if (pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS || + pMeterMeta->numOfTags + pMeterMeta->numOfColumns >= TSDB_MAX_COLUMNS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return false; + } + + if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_NCHAR) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return false; + } + + if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return false; + } + + SSchema* pSchema = tsGetSchema(pMeterMeta); + int32_t nLen = 0; + + for (int32_t i = 0; i < pMeterMeta->numOfColumns; ++i) { + nLen += pSchema[i].bytes; + } + + if (pColField->bytes <= 0) { + setErrMsg(pCmd, msg6, tListLen(msg6)); + return false; + } + + // length less than TSDB_MAX_BYTES_PER_ROW + if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return false; + } + + // field name must be unique + for (int32_t i = 0; i < pMeterMeta->numOfTags + pMeterMeta->numOfColumns; ++i) { + if (strncasecmp(pColField->name, pSchema[i].name, TSDB_COL_NAME_LEN) == 0) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return false; + } + } + + return true; +} + +/* is contained in pFieldList or not */ +static bool has(tFieldList* pFieldList, int32_t startIdx, char* name) { + for (int32_t j = startIdx; j < pFieldList->nField; ++j) { + if (strncasecmp(name, pFieldList->p[j].name, TSDB_COL_NAME_LEN) == 0) return true; + } + + return false; +} + +static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; } + +static void getCurrentDBName(SSqlObj* pSql, SSQLToken* pDBToken) { + pDBToken->z = pSql->pTscObj->db; + pDBToken->n = strlen(pSql->pTscObj->db); +} + +/* length limitation, strstr cannot be applied */ +static bool hasSpecifyDB(SSQLToken* pTableName) { + for (int32_t i = 0; i < pTableName->n; ++i) { + if (pTableName->z[i] == TS_PATH_DELIMITER[0]) { + return true; + } + } + + return false; +} + +static int32_t setObjFullName(char* fullName, char* account, SSQLToken* pDB, SSQLToken* tableName, int32_t* xlen) { + int32_t totalLen = 0; + + if (account != NULL) { + int32_t len = strlen(account); + strcpy(fullName, account); + fullName[len] = TS_PATH_DELIMITER[0]; + totalLen += (len + 1); + } + + /* db name is not specified, the tableName dose not include db name */ + if (pDB != NULL) { + if (pDB->n > TSDB_DB_NAME_LEN) { + return TSDB_CODE_INVALID_SQL; + } + + memcpy(&fullName[totalLen], pDB->z, pDB->n); + totalLen += pDB->n; + } + + if (tableName != NULL) { + if (pDB != NULL) { + fullName[totalLen] = TS_PATH_DELIMITER[0]; + totalLen += 1; + + /* here we only check the table name length limitation */ + if (tableName->n > TSDB_METER_NAME_LEN) { + return TSDB_CODE_INVALID_SQL; + } + } else { // pDB == NULL, the db prefix name is specified in tableName + /* the length limitation includes tablename + dbname + sep */ + if (tableName->n > TSDB_METER_NAME_LEN + TSDB_DB_NAME_LEN + tListLen(TS_PATH_DELIMITER)) { + return TSDB_CODE_INVALID_SQL; + } + } + + memcpy(&fullName[totalLen], tableName->z, tableName->n); + totalLen += tableName->n; + } + + fullName[totalLen] = 0; + + if (xlen != NULL) { + *xlen = totalLen; + } + return (totalLen <= TSDB_METER_ID_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_INVALID_SQL; +} + +static void extractColumnNameFromString(tSQLExprItem* pItem, char* tmpBuf) { + if (pItem->pNode->nSQLOptr == TK_STRING) { + strdequote(pItem->pNode->val.pz); + strcpy(tmpBuf, pItem->pNode->val.pz); + + tVariantDestroy(&pItem->pNode->val); + pItem->pNode->nSQLOptr = TK_ID; + + SSQLToken* pIdToken = &pItem->pNode->colInfo; + pIdToken->type = TK_ID; + pIdToken->z = tmpBuf; + pIdToken->n = strlen(pIdToken->z); + } +} + +int32_t buildSelectionClause(SSqlCmd* pCmd, tSQLExprList* pSelection, bool isMetric) { + assert(pSelection != NULL && pCmd != NULL); + + char msg1[] = "invalid column name/illegal column type in arithmetic expression"; + char msg2[] = "functions can not be mixed up"; + char msg3[] = "not support query expression"; + char msg4[] = "function not support in stable query"; + + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + for (int32_t i = 0; i < pSelection->nExpr; ++i) { + int32_t outputIndex = pCmd->fieldsInfo.numOfOutputCols; + + tSQLExprItem* pItem = &pSelection->a[i]; + if (pItem->pNode->nSQLOptr == TK_ALL || pItem->pNode->nSQLOptr == TK_ID || + pItem->pNode->nSQLOptr == TK_STRING) { // project on all fields + + if (pItem->pNode->nSQLOptr == TK_ID && (pItem->pNode->colInfo.z == NULL && pItem->pNode->colInfo.n == 0)) { + /* it is actually a function, but the function name is invalid */ + return TSDB_CODE_INVALID_SQL; + } + + /* if the name of column is quoted, remove it and set the right + * information for later process */ + char tmpName[TSDB_METER_NAME_LEN + 1] = {0}; + extractColumnNameFromString(pItem, tmpName); + + /* select * / select field_name1, field_name2 from table_name */ + int32_t ret = addProjectionExprAndResultField(pCmd, pSchema, pItem, isMetric); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_LAST_ROW) { + // sql function optr + /* sql function in selection clause, append sql function info in pSqlCmd structure sequentially */ + if (addExprAndResultField(pCmd, outputIndex, pItem) == -1) { + return TSDB_CODE_INVALID_SQL; + } + + } else if (pItem->pNode->nSQLOptr >= TK_PLUS && pItem->pNode->nSQLOptr <= TK_REM) { + /* arithmetic function in select*/ + int32_t ret = validateArithmeticSQLExpr(pItem->pNode, pSchema, pCmd->pMeterMeta->numOfColumns); + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + SColumnIdList ids = {0}; + ids.pSchema = pSchema; + ids.numOfCols = pCmd->pMeterMeta->numOfColumns; + + char arithmeticExprStr[1024] = {0}; + char* p = arithmeticExprStr; + + if (buildArithmeticExprString(pItem->pNode, &p, &ids) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // expr string is set as the parameter of function + SSqlExpr* pExpr = tscSqlExprInsert(pCmd, outputIndex, TSDB_FUNC_ARITHM, 0, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + addExprParams(pExpr, arithmeticExprStr, TSDB_DATA_TYPE_BINARY, strlen(arithmeticExprStr)); + + /* todo alias name should use the original sql string */ + if (pItem->aliasName != NULL) { + strncpy(pExpr->aliasName, pItem->aliasName, TSDB_COL_NAME_LEN); + } else { + strncpy(pExpr->aliasName, arithmeticExprStr, TSDB_COL_NAME_LEN); + } + + SColumnList idx = {.numOfCols = ids.numOfRecordedCols, .ids = {0}}; + memcpy(idx.ids, ids.ids, ids.numOfRecordedCols * sizeof(ids.ids[0])); + + insertResultField(pCmd, i, &idx, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName); + } else { + /* + * not support such expression + * e.g., select 12+5 from table_name + */ + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + if (pCmd->fieldsInfo.numOfOutputCols > TSDB_MAX_COLUMNS) { + return TSDB_CODE_INVALID_SQL; + } + } + + if (!functionCompatibleCheck(pCmd)) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + if (isMetric) { + pCmd->metricQuery = 1; + + if (onlyQueryMetricTags(pCmd)) { // local handle the metric tag query + pCmd->command = TSDB_SQL_RETRIEVE_TAGS; + pCmd->count = pCmd->pMeterMeta->numOfColumns; // the number of meter schema, tricky. + } + + /* + * transfer sql functions that need secondary merge into another format + * in dealing with metric queries such as: count/first/last + */ + tscTansformSQLFunctionForMetricQuery(pCmd); + + if (hasUnsupportFunctionsForMetricQuery(pCmd)) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t insertResultField(SSqlCmd* pCmd, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, + char* fieldName) { + for (int32_t i = 0; i < pIdList->numOfCols; ++i) { + tscColumnInfoInsert(pCmd, pIdList->ids[i]); + } + + tscFieldInfoSetValue(&pCmd->fieldsInfo, outputIndex, type, fieldName, bytes); + return TSDB_CODE_SUCCESS; +} + +void setProjExprForMetricQuery(SSqlCmd* pCmd, int32_t outputIndex, int32_t colIdx) { + pCmd->metricQuery = 1; + SSchema* pSchema = tsGetSchemaColIdx(pCmd->pMeterMeta, colIdx); + + int16_t functionId = (int16_t)((colIdx >= pCmd->pMeterMeta->numOfColumns) ? TSDB_FUNC_TAGPRJ : // tagPrj function + TSDB_FUNC_PRJ); // colprj function + + int32_t numOfCols = pCmd->pMeterMeta->numOfColumns; + + bool isTag = false; + if (colIdx >= numOfCols) { + colIdx -= numOfCols; + addRequiredTagColumn(pCmd, colIdx); + isTag = true; + } + + SSqlExpr* pExpr = tscSqlExprInsert(pCmd, outputIndex, functionId, colIdx, pSchema->type, pSchema->bytes); + pExpr->colInfo.isTag = isTag; +} + +void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex) { + if (pCmd->numOfReqTags == 0 || pCmd->tagColumnIndex[pCmd->numOfReqTags - 1] < tagColIndex) { + pCmd->tagColumnIndex[pCmd->numOfReqTags++] = tagColIndex; + } else { // find the appropriate position + for (int32_t i = 0; i < pCmd->numOfReqTags; ++i) { + if (tagColIndex > pCmd->tagColumnIndex[i]) { + continue; + } else if (tagColIndex == pCmd->tagColumnIndex[i]) { + break; + } else { + memmove(&pCmd->tagColumnIndex[i + 1], &pCmd->tagColumnIndex[i], + sizeof(pCmd->tagColumnIndex[0]) * (pCmd->numOfReqTags - i)); + pCmd->tagColumnIndex[i] = tagColIndex; + + pCmd->numOfReqTags++; + break; + } + } + } + + // plus one means tbname + assert(tagColIndex >= -1 && tagColIndex < TSDB_MAX_TAGS && pCmd->numOfReqTags <= TSDB_MAX_TAGS + 1); +} + +int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SSchema* pSchema, tSQLExprItem* pItem, bool isMetric) { + int32_t startPos = pCmd->fieldsInfo.numOfOutputCols; + + if (pItem->pNode->nSQLOptr == TK_ALL) { // project on all fields + int32_t numOfTotalColumns = 0; + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + + if (isMetric) { // metric query + numOfTotalColumns = pMeterMeta->numOfColumns + pMeterMeta->numOfTags; + + for (int32_t j = 0; j < numOfTotalColumns; ++j) { + setProjExprForMetricQuery(pCmd, startPos + j, j); + + SColumnList ids = {.numOfCols = 1, .ids = {j}}; + + // tag columns do not add to source list + if (j >= pMeterMeta->numOfColumns) { + ids.numOfCols = 0; + } + insertResultField(pCmd, startPos + j, &ids, pSchema[j].bytes, pSchema[j].type, pSchema[j].name); + } + } else { // meter query + numOfTotalColumns = pMeterMeta->numOfColumns; + for (int32_t j = 0; j < numOfTotalColumns; ++j) { + tscSqlExprInsert(pCmd, j, TSDB_FUNC_PRJ, j, pSchema[j].type, pSchema[j].bytes); + + SColumnList ids = {.numOfCols = 1, .ids = {j}}; + insertResultField(pCmd, startPos + j, &ids, pSchema[j].bytes, pSchema[j].type, pSchema[j].name); + } + } + + } else if (pItem->pNode->nSQLOptr == TK_ID) { // simple column projection query + int32_t numOfAllCols = pCmd->pMeterMeta->numOfColumns + pCmd->pMeterMeta->numOfTags; + int32_t idx = getColumnIndexByName(&pItem->pNode->colInfo, pSchema, numOfAllCols); + if (idx == -1) { + if (strncmp(pItem->pNode->colInfo.z, TSQL_TBNAME_L, 6) == 0 && pItem->pNode->colInfo.n == 6) { + SSqlExpr* pExpr = + tscSqlExprInsert(pCmd, startPos, TSDB_FUNC_TAGPRJ, -1, TSDB_DATA_TYPE_BINARY, TSDB_METER_NAME_LEN); + + SColumnList ids = {.numOfCols = 1, .ids = {idx}}; + insertResultField(pCmd, startPos, &ids, TSDB_METER_NAME_LEN, TSDB_DATA_TYPE_BINARY, TSQL_TBNAME_L); + + pCmd->metricQuery = 1; + addRequiredTagColumn(pCmd, -1); + pExpr->colInfo.isTag = true; + } else { + char msg[] = "invalid column name"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + } else { + if (isMetric) { + setProjExprForMetricQuery(pCmd, startPos, idx); + } else { + tscSqlExprInsert(pCmd, startPos, TSDB_FUNC_PRJ, idx, pSchema[idx].type, pSchema[idx].bytes); + } + + char* colName = (pItem->aliasName == NULL) ? pSchema[idx].name : pItem->aliasName; + SColumnList ids = {.numOfCols = 1, .ids = {idx}}; + + if (idx >= pCmd->pMeterMeta->numOfColumns || idx == -1) { + ids.numOfCols = 0; + } + + insertResultField(pCmd, startPos, &ids, pSchema[idx].bytes, pSchema[idx].type, colName); + } + } else { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SSchema* pSchema, int32_t functionID, char* aliasName, + int32_t resColIdx, int32_t idx) { + int16_t type = 0; + int16_t bytes = 0; + + char columnName[TSDB_COL_NAME_LEN + 1] = {0}; + char msg1[] = "not support column types"; + + if (functionID == TSDB_FUNC_SPREAD) { + if (pSchema[idx].type == TSDB_DATA_TYPE_BINARY || pSchema[idx].type == TSDB_DATA_TYPE_NCHAR || + pSchema[idx].type == TSDB_DATA_TYPE_BOOL) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return -1; + } else { + type = TSDB_DATA_TYPE_DOUBLE; + bytes = tDataTypeDesc[type].nSize; + } + } else { + type = pSchema[idx].type; + bytes = pSchema[idx].bytes; + } + + if (aliasName != NULL) { + strcpy(columnName, aliasName); + } else { + getRevisedName(columnName, functionID, TSDB_COL_NAME_LEN, pSchema[idx].name); + } + + tscSqlExprInsert(pCmd, resColIdx, functionID, idx, type, bytes); + + /* for point interpolation/last_row query, we need the timestamp column to be + * loaded */ + if (functionID == TSDB_FUNC_INTERP || functionID == TSDB_FUNC_LAST_ROW) { + tscColumnInfoInsert(pCmd, PRIMARYKEY_TIMESTAMP_COL_INDEX); + } + + SColumnList ids = {.numOfCols = 1, .ids = {idx}}; + insertResultField(pCmd, resColIdx, &ids, bytes, type, columnName); + + return TSDB_CODE_SUCCESS; +} + +int32_t addExprAndResultField(SSqlCmd* pCmd, int32_t colIdx, tSQLExprItem* pItem) { + int32_t optr = pItem->pNode->nSQLOptr; + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + int32_t numOfAddedColumn = 1; + + char msg[] = "invalid parameters"; + char msg1[] = "not support column types"; + char msg3[] = "illegal column name"; + char msg5[] = "parameter is out of range [0, 100]"; + + switch (optr) { + case TK_COUNT: { + if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) { + /* more than one parameter for count() function */ + setErrMsg(pCmd, msg, tListLen(msg)); + return -1; + } + + int16_t functionID = 0; + if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + return -1; + } + + int32_t columnId = 0; + + if (pItem->pNode->pParam != NULL) { + SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo; + if (pToken->z == NULL || pToken->n == 0) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return -1; + } + + /* count the number of meters created according to the metric */ + if (strncmp(pToken->z, "tbname", 6) == 0 && pToken->n == 6) { + tscSqlExprInsert(pCmd, colIdx, functionID, -1, TSDB_DATA_TYPE_BIGINT, + tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize); + } else { + columnId = getColumnIndexByName(pToken, pSchema, pCmd->pMeterMeta->numOfColumns); + if (columnId < 0) { // invalid column name + setErrMsg(pCmd, msg3, tListLen(msg3)); + return -1; + } + + tscSqlExprInsert(pCmd, colIdx, functionID, columnId, TSDB_DATA_TYPE_BIGINT, + tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize); + } + } else { + /* count(*) is equalled to count(primary_timestamp_key) */ + tscSqlExprInsert(pCmd, colIdx, functionID, PRIMARYKEY_TIMESTAMP_COL_INDEX, TSDB_DATA_TYPE_BIGINT, + tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize); + } + + char columnName[TSDB_COL_NAME_LEN] = {0}; + getColumnName(pItem, columnName, TSDB_COL_NAME_LEN); + + // count always use the primary timestamp key column, which is 0. + SColumnList ids = {.numOfCols = 1, .ids = {columnId}}; + insertResultField(pCmd, colIdx, &ids, sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, columnName); + return numOfAddedColumn; + } + case TK_SUM: + case TK_AVG: + case TK_WAVG: + case TK_MIN: + case TK_MAX: + case TK_DIFF: + case TK_STDDEV: + case TK_LEASTSQUARES: { + // 1. valid the number of parameters + if (pItem->pNode->pParam == NULL || (optr != TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 1) || + (optr == TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 3)) { + /* no parameters or more than one parameter for function */ + setErrMsg(pCmd, msg, tListLen(msg)); + return -1; + } + + tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); + if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { + setErrMsg(pCmd, msg, tListLen(msg)); + return -1; + } + + int32_t idx = getColumnIndexByName(&pParamElem->pNode->colInfo, pSchema, pCmd->pMeterMeta->numOfColumns); + if (idx < 0) { // invalid column name + setErrMsg(pCmd, msg3, tListLen(msg3)); + return -1; + } + + // 2. check if sql function can be applied on this column data type + int16_t colType = pSchema[idx].type; + if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return -1; + } + + char columnName[TSDB_COL_NAME_LEN] = {0}; + getColumnName(pItem, columnName, TSDB_COL_NAME_LEN); + + int16_t resultType = 0; + int16_t resultSize = 0; + + int16_t functionID = 0; + if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) { + return -1; + } + + getResultInfo(pSchema[idx].type, pSchema[idx].bytes, functionID, 0, &resultType, &resultSize); + + if (optr == TK_DIFF) { + // set the first column ts for diff query + colIdx += 1; + tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS_DUMMY, 0, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE); + + SColumnList ids = {.numOfCols = 1, .ids = {0}}; + insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName); + } + + SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionID, idx, resultType, resultSize); + + if (optr == TK_LEASTSQUARES) { + /* set the leastsquares parameters */ + char val[8] = {0}; + if (tVariantDump(&pParamElem[1].pNode->val, val, TSDB_DATA_TYPE_DOUBLE) < 0) { + return TSDB_CODE_INVALID_SQL; + } + + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + + memset(val, 0, tListLen(val)); + if (tVariantDump(&pParamElem[2].pNode->val, val, TSDB_DATA_TYPE_DOUBLE) < 0) { + return TSDB_CODE_INVALID_SQL; + } + + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + } + + SColumnList ids = {.numOfCols = 1, .ids = {idx}}; + insertResultField(pCmd, colIdx, &ids, pExpr->resBytes, pExpr->resType, columnName); + + return numOfAddedColumn; + } + case TK_FIRST: + case TK_LAST: + case TK_SPREAD: + case TK_LAST_ROW: + case TK_INTERP: { + bool requireAllFields = (pItem->pNode->pParam == NULL); + + int16_t functionID = 0; + changeFunctionID(optr, &functionID); + + if (!requireAllFields) { + if (pItem->pNode->pParam->nExpr < 1) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return -1; + } + + /* in first/last function, multiple columns can be add to resultset */ + + for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) { + tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]); + if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return -1; + } + + int32_t idx = getColumnIndexByName(&pParamElem->pNode->colInfo, pSchema, pCmd->pMeterMeta->numOfColumns); + if (idx == -1) { + return -1; + } + + if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i, idx) != 0) { + return -1; + } + } + + return pItem->pNode->pParam->nExpr; + } else { + for (int32_t i = 0; i < pCmd->pMeterMeta->numOfColumns; ++i) { + if (setExprInfoForFunctions(pCmd, pSchema, functionID, pItem->aliasName, colIdx + i, i) != 0) { + return -1; + } + } + + return pCmd->pMeterMeta->numOfColumns; + } + } + case TK_TOP: + case TK_BOTTOM: + case TK_PERCENTILE: + case TK_APERCENTILE: { + // 1. valid the number of parameters + if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 2) { + /* no parameters or more than one parameter for function */ + setErrMsg(pCmd, msg, tListLen(msg)); + return -1; + } + + tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); + if (pParamElem->pNode->nSQLOptr != TK_ID) { + setErrMsg(pCmd, msg, tListLen(msg)); + } + + char columnName[TSDB_COL_NAME_LEN] = {0}; + getColumnName(pItem, columnName, TSDB_COL_NAME_LEN); + + int32_t idx = getColumnIndexByName(&pParamElem->pNode->colInfo, pSchema, pCmd->pMeterMeta->numOfColumns); + if (idx == -1) { + return -1; + } + + // 2. valid the column type + int16_t colType = pSchema[idx].type; + if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return -1; + } + + // 3. valid the parameters + if (pParamElem[1].pNode->nSQLOptr == TK_ID) { + setErrMsg(pCmd, msg, tListLen(msg)); + return -1; + } + + tVariant* pVariant = &pParamElem[1].pNode->val; + + int8_t resultType = pSchema[idx].type; + int16_t resultSize = pSchema[idx].bytes; + + char val[8] = {0}; + if (optr == TK_PERCENTILE || optr == TK_APERCENTILE) { + tVariantDump(pVariant, val, TSDB_DATA_TYPE_DOUBLE); + + double dp = *((double*)val); + if (dp < 0 || dp > 100) { // todo use macro + setErrMsg(pCmd, msg5, tListLen(msg5)); + return -1; + } + + resultSize = sizeof(double); + resultType = TSDB_DATA_TYPE_DOUBLE; + + /* + * sql function transformation + * for dp = 0, it is actually min, + * for dp = 100, it is max, + */ + int16_t functionId = 0; + if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { + return -1; + } + + SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionId, idx, resultType, resultSize); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); + } else { + tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT); + + int64_t nTop = *((int32_t*)val); + if (nTop <= 0 || nTop > 100) { // todo use macro + return -1; + } + + int16_t functionId = 0; + if (changeFunctionID(optr, &functionId) != TSDB_CODE_SUCCESS) { + return -1; + } + // set the first column ts for top/bottom query + tscSqlExprInsert(pCmd, 0, TSDB_FUNC_TS, 0, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE); + SColumnList ids = {.numOfCols = 1, .ids = {0}}; + insertResultField(pCmd, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS].aName); + + colIdx += 1; // the first column is ts + numOfAddedColumn += 1; + + SSqlExpr* pExpr = tscSqlExprInsert(pCmd, colIdx, functionId, idx, resultType, resultSize); + addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); + } + + SColumnList ids = {.numOfCols = 1, .ids = {idx}}; + insertResultField(pCmd, colIdx, &ids, resultSize, resultType, columnName); + return numOfAddedColumn; + } + default: + return -1; + } +} + +void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength) { + if (pItem->aliasName != NULL) { + strncpy(resultFieldName, pItem->aliasName, nameLength); + } else { + int32_t len = (pItem->pNode->operand.n < nameLength) ? pItem->pNode->operand.n : nameLength; + strncpy(resultFieldName, pItem->pNode->operand.z, len); + } +} + +void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName) { + snprintf(resultFieldName, maxLen, "%s(%s)", aAggs[functionId].aName, columnName); +} + +int32_t getColumnIndexByName(SSQLToken* pToken, SSchema* pSchema, int32_t numOfCols) { + if (pToken->z == NULL || pToken->n == 0) { + return -1; + } + + char* r = strnchr(pToken->z, '.', pToken->n); + if (r != NULL) { + r += 1; + + pToken->n -= (r - pToken->z); + pToken->z = r; + } + if (strncasecmp(pToken->z, "_c0", pToken->n) == 0) return 0; + + for (int32_t i = 0; i < numOfCols; ++i) { + if (pToken->n != strlen(pSchema[i].name)) continue; + if (strncasecmp(pSchema[i].name, pToken->z, pToken->n) == 0) return i; + } + + return -1; +} + +int32_t changeFunctionID(int32_t optr, int16_t* functionId) { + switch (optr) { + case TK_COUNT: + *functionId = TSDB_FUNC_COUNT; + break; + case TK_SUM: + *functionId = TSDB_FUNC_SUM; + break; + case TK_AVG: + *functionId = TSDB_FUNC_AVG; + break; + case TK_MIN: + *functionId = TSDB_FUNC_MIN; + break; + case TK_MAX: + *functionId = TSDB_FUNC_MAX; + break; + case TK_STDDEV: + *functionId = TSDB_FUNC_STDDEV; + break; + case TK_PERCENTILE: + *functionId = TSDB_FUNC_PERCT; + break; + case TK_APERCENTILE: + *functionId = TSDB_FUNC_APERCT; + break; + case TK_FIRST: + *functionId = TSDB_FUNC_FIRST; + break; + case TK_LAST: + *functionId = TSDB_FUNC_LAST; + break; + case TK_LEASTSQUARES: + *functionId = TSDB_FUNC_LEASTSQR; + break; + case TK_TOP: + *functionId = TSDB_FUNC_TOP; + break; + case TK_BOTTOM: + *functionId = TSDB_FUNC_BOTTOM; + break; + case TK_DIFF: + *functionId = TSDB_FUNC_DIFF; + break; + case TK_SPREAD: + *functionId = TSDB_FUNC_SPREAD; + break; + case TK_WAVG: + *functionId = TSDB_FUNC_WAVG; + break; + case TK_INTERP: + *functionId = TSDB_FUNC_INTERP; + break; + case TK_LAST_ROW: + *functionId = TSDB_FUNC_LAST_ROW; + break; + default: + return -1; + } + + return TSDB_CODE_SUCCESS; +} + +// TODO support like for showing metrics, there are show meters with like ops +int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { + SSqlCmd* pCmd = &pSql->cmd; + pCmd->command = TSDB_SQL_SHOW; + int8_t type = pInfo->sqlType; + + char msg[] = "database name too long"; + char msg1[] = "invalid database name"; + char msg2[] = "pattern filter string too long"; + + switch (type) { + case SHOW_VGROUPS: + pCmd->type = TSDB_MGMT_TABLE_VGROUP; + break; + case SHOW_TABLES: + pCmd->type = TSDB_MGMT_TABLE_TABLE; + break; + case SHOW_STABLES: + pCmd->type = TSDB_MGMT_TABLE_METRIC; + break; + + case SHOW_DATABASES: + pCmd->type = TSDB_MGMT_TABLE_DB; + break; + case SHOW_DNODES: + pCmd->type = TSDB_MGMT_TABLE_PNODE; + break; + case SHOW_USERS: + pCmd->type = TSDB_MGMT_TABLE_USER; + break; + case SHOW_CONNECTIONS: + pCmd->type = TSDB_MGMT_TABLE_CONNS; + break; + case SHOW_QUERIES: + pCmd->type = TSDB_MGMT_TABLE_QUERIES; + break; + case SHOW_STREAMS: + pCmd->type = TSDB_MGMT_TABLE_STREAMS; + break; + default: + return TSDB_CODE_INVALID_SQL; + } + + /* + * database prefix in pInfo->pDCLInfo->a[0] + * wildcard in like clause in pInfo->pDCLInfo->a[1] + */ + if (type == SHOW_TABLES || type == SHOW_STABLES || type == SHOW_VGROUPS) { + // db prefix in tagCond, show table conds in payload + if (pInfo->pDCLInfo->nTokens > 0) { + SSQLToken* pDbPrefixToken = &pInfo->pDCLInfo->a[0]; + + if (pDbPrefixToken->n > TSDB_DB_NAME_LEN) { // db name is too long + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (pDbPrefixToken->n > 0 && tscValidateName(pDbPrefixToken) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = 0; + if (pDbPrefixToken->n > 0) { // has db prefix + if (pCmd->tagCond.allocSize < TSDB_MAX_TAGS_LEN) { + pCmd->tagCond.pData = realloc(pCmd->tagCond.pData, TSDB_MAX_TAGS_LEN); + pCmd->tagCond.allocSize = TSDB_MAX_TAGS_LEN; + } + ret = setObjFullName(pCmd->tagCond.pData, getAccountId(pSql), pDbPrefixToken, NULL, &pCmd->tagCond.len); + } else { + ret = setObjFullName(pCmd->name, getAccountId(pSql), NULL, NULL, NULL); + } + + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (type != SHOW_VGROUPS && pInfo->pDCLInfo->nTokens == 2) { + // set the like conds for show tables + SSQLToken* likeToken = &pInfo->pDCLInfo->a[1]; + strncpy(pCmd->payload, likeToken->z, likeToken->n); + strdequote(pCmd->payload); + pCmd->payloadLen = strlen(pCmd->payload); + + if (pCmd->payloadLen > TSDB_METER_NAME_LEN) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; // wildcard is too long + } + } + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { + SSqlCmd* pCmd = &pSql->cmd; + + switch (pInfo->sqlType) { + case KILL_QUERY: + pCmd->command = TSDB_SQL_KILL_QUERY; + break; + case KILL_STREAM: + pCmd->command = TSDB_SQL_KILL_STREAM; + break; + case KILL_CONNECTION: + pCmd->command = TSDB_SQL_KILL_CONNECTION; + break; + default: + return TSDB_CODE_INVALID_SQL; + } + + SSQLToken* pToken = &(pInfo->pDCLInfo->a[0]); + if (pToken->n > TSDB_KILL_MSG_LEN) { + return TSDB_CODE_INVALID_SQL; + } + + strncpy(pCmd->payload, pToken->z, pToken->n); + + const char delim = ':'; + char* ipStr = strtok(pToken->z, &delim); + char* portStr = strtok(NULL, &delim); + + if (!validateIpAddress(ipStr)) { + memset(pCmd->payload, 0, tListLen(pCmd->payload)); + + char msg[] = "invalid ip address"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t port = strtol(portStr, NULL, 10); + if (port <= 0 || port > 65535) { + memset(pCmd->payload, 0, tListLen(pCmd->payload)); + + char msg[] = "invalid port"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +void setErrMsg(SSqlCmd* pCmd, char* pzErrMsg, int32_t maxLen) { + int32_t validLen = (maxLen > pCmd->allocSize) ? pCmd->allocSize : maxLen; + strncpy(pCmd->payload, pzErrMsg, validLen); + pCmd->payload[validLen - 1] = 0; +} + +bool validateIpAddress(char* ip) { + in_addr_t ipAddr = inet_addr(ip); + return (ipAddr != 0) && (ipAddr != 0xffffffff); +} + +void tscTansformSQLFunctionForMetricQuery(SSqlCmd* pCmd) { + if (pCmd->pMeterMeta == NULL || !UTIL_METER_IS_METRIC(pCmd)) { + return; + } + + assert(pCmd->pMeterMeta->numOfTags >= 0); + + int16_t bytes = 0; + int16_t type = 0; + + for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); + TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, k); + + int16_t functionId = aAggs[pExpr->sqlFuncId].stableFuncId; + + if (functionId >= TSDB_FUNC_SUM_DST && functionId <= TSDB_FUNC_APERCT_DST) { + getResultInfo(pField->type, pField->bytes, functionId, pExpr->param[0].i64Key, &type, &bytes); + tscSqlExprUpdate(pCmd, k, functionId, pExpr->colInfo.colIdx, TSDB_DATA_TYPE_BINARY, bytes); + } + } + + tscFieldInfoRenewOffsetForInterResult(pCmd); +} + +/* transfer the field-info back to original input format */ +void tscRestoreSQLFunctionForMetricQuery(SSqlCmd* pCmd) { + if (!UTIL_METER_IS_METRIC(pCmd)) { + return; + } + + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, i); + + if (pExpr->sqlFuncId >= TSDB_FUNC_SUM_DST && pExpr->sqlFuncId <= TSDB_FUNC_WAVG_DST) { + pExpr->resBytes = pField->bytes; + pExpr->resType = pField->type; + } + } +} + +bool onlyQueryMetricTags(SSqlCmd* pCmd) { + assert(pCmd->metricQuery == 1); + + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + if (tscSqlExprGet(pCmd, i)->sqlFuncId != TSDB_FUNC_TAGPRJ) { // 18 == "tagprj" function + return false; + } + } + + return true; +} + +bool hasUnsupportFunctionsForMetricQuery(SSqlCmd* pCmd) { + // filter sql function not supported by metric query yet. + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pCmd, i)->sqlFuncId; + if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_METRIC) == 0) { + return true; + } + } + return false; +} + +static bool functionCompatibleCheck(SSqlCmd* pCmd) { + int32_t startIdx = 0; + int32_t functionID = tscSqlExprGet(pCmd, startIdx)->sqlFuncId; + if (functionID == TSDB_FUNC_TS || functionID == TSDB_FUNC_TS_DUMMY) { + startIdx++; // ts function can be simultaneously used with any other + // functions. + } + + int32_t nRetCount = funcCompatList[tscSqlExprGet(pCmd, startIdx)->sqlFuncId]; + + // diff function cannot be executed with other function + // arithmetic function can be executed with other arithmetic functions + for (int32_t i = startIdx + 1; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + if (funcCompatList[pExpr->sqlFuncId] != nRetCount) { + return false; + } + } + + return true; +} + +void updateTagColumnIndex(SSqlCmd* pCmd) { + // update tags column index for group by tags + for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupbyCols; ++i) { + int32_t index = pCmd->groupbyExpr.tagIndex[i]; + + for (int32_t j = 0; j < pCmd->numOfReqTags; ++j) { + int32_t tagColIndex = pCmd->tagColumnIndex[j]; + if (tagColIndex == index) { + pCmd->groupbyExpr.tagIndex[i] = j; + break; + } + } + } + + // update tags column index for expression + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + + // not tags, continue + if (!pExpr->colInfo.isTag) { + continue; + } + + for (int32_t j = 0; j < pCmd->numOfReqTags; ++j) { + if (pExpr->colInfo.colIdx == pCmd->tagColumnIndex[j]) { + pExpr->colInfo.colIdx = j; + break; + } + } + } +} + +int32_t setGroupByClause(SSqlCmd* pCmd, tVariantList* pList) { + char msg1[] = "too many columns in group by clause"; + char msg2[] = "invalid column name in group by clause"; + char msg3[] = "functions are not available in group by query"; + char msg4[] = "group by only available for stable query"; + + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { + if (pList == NULL) { + return TSDB_CODE_SUCCESS; + } else { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } + } + + if (pList == NULL) { + return TSDB_CODE_SUCCESS; + } + + pCmd->groupbyExpr.numOfGroupbyCols = pList->nExpr; + if (pList->nExpr > TSDB_MAX_TAGS) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta); + + int32_t numOfReqTags = 0; + + for (int32_t i = 0; i < pList->nExpr; ++i) { + tVariant* pVar = &pList->a[i].pVar; + SSQLToken token = {pVar->nLen, pVar->nType, pVar->pz}; + + int32_t colIdx = 0; + int16_t type = 0; + int16_t bytes = 0; + char* name = NULL; + + /* group by tbname*/ + if (strncasecmp(pVar->pz, TSQL_TBNAME_L, pVar->nLen) == 0) { + colIdx = -1; + type = TSDB_DATA_TYPE_BINARY; + bytes = TSDB_METER_NAME_LEN; + name = TSQL_TBNAME_L; + } else { + colIdx = getColumnIndexByName(&token, pSchema, pMeterMeta->numOfTags + pMeterMeta->numOfColumns); + if (colIdx < pMeterMeta->numOfColumns || colIdx > TSDB_MAX_TAGS + pMeterMeta->numOfColumns) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + type = pSchema[colIdx].type; + bytes = pSchema[colIdx].bytes; + name = pSchema[colIdx].name; + numOfReqTags++; + } + + SSqlExpr* pExpr = tscSqlExprInsert(pCmd, pCmd->fieldsInfo.numOfOutputCols, TSDB_FUNC_TAG, colIdx, type, bytes); + pExpr->colInfo.isTag = true; + + // NOTE: tag column does not add to source column list + SColumnList ids = {0}; + insertResultField(pCmd, pCmd->fieldsInfo.numOfOutputCols, &ids, bytes, type, name); + + int32_t relIndex = 0; + if (colIdx != -1) { + relIndex = colIdx - pMeterMeta->numOfColumns; + } else { // tbname + relIndex = colIdx; + } + + pExpr->colInfo.colIdx = relIndex; + pCmd->groupbyExpr.tagIndex[i] = relIndex; + + assert(pCmd->groupbyExpr.tagIndex[i] >= -1); + addRequiredTagColumn(pCmd, pCmd->groupbyExpr.tagIndex[i]); + } + + /* + * check all query functions in selection clause, multi-output functions are not available in query function + */ + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + int32_t functId = tscSqlExprGet(pCmd, i)->sqlFuncId; + if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP_DST && functId != TSDB_FUNC_BOTTOM_DST && + functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + if (functId == TSDB_FUNC_COUNT && tscSqlExprGet(pCmd, i)->colInfo.colIdx == -1) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + } + + return TSDB_CODE_SUCCESS; +} + +void setColumnOffsetValueInResultset(SSqlCmd* pCmd) { + if (pCmd->metricQuery == 0) { + tscFieldInfoCalOffset(pCmd); + } else { + tscFieldInfoRenewOffsetForInterResult(pCmd); + } +} + +static void setColumnFilterInfo(SSqlCmd* pCmd, SColumnBase* pColFilter, int32_t colIdx, tSQLExpr* pExpr) { + tSQLExpr* pRight = pExpr->pRight; + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + pColFilter->filterOn = 1; + + pColFilter->colIndex = colIdx; + + int16_t colType = pSchema[colIdx].type; + if ((colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) || colType == TSDB_DATA_TYPE_TIMESTAMP) { + colType = TSDB_DATA_TYPE_BIGINT; + } else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) { + colType = TSDB_DATA_TYPE_DOUBLE; + } + + if (pExpr->nSQLOptr == TK_LE || pExpr->nSQLOptr == TK_LT) { + tVariantDump(&pRight->val, (char*)&pColFilter->upperBndd, colType); + } else { // TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColFilter->lowerBndd + if (colType == TSDB_DATA_TYPE_BINARY) { + pColFilter->pz = (int64_t)malloc(pRight->val.nLen + 1); + pColFilter->len = pRight->val.nLen; + + tVariantDump(&pRight->val, (char*)pColFilter->pz, colType); + ((char*)pColFilter->pz)[pColFilter->len] = 0; + } else { + tVariantDump(&pRight->val, (char*)&pColFilter->lowerBndd, colType); + } + } + + switch (pExpr->nSQLOptr) { + case TK_LE: + pColFilter->upperRelOptr = TSDB_RELATION_LESS_EQUAL; + break; + case TK_LT: + pColFilter->upperRelOptr = TSDB_RELATION_LESS; + break; + case TK_GT: + pColFilter->lowerRelOptr = TSDB_RELATION_LARGE; + break; + case TK_GE: + pColFilter->lowerRelOptr = TSDB_RELATION_LARGE_EQUAL; + break; + case TK_EQ: + pColFilter->lowerRelOptr = TSDB_RELATION_EQUAL; + break; + case TK_NE: + pColFilter->lowerRelOptr = TSDB_RELATION_NOT_EQUAL; + break; + case TK_LIKE: + pColFilter->lowerRelOptr = TSDB_RELATION_LIKE; + break; + } +} + +static int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t optr, int16_t precision); + +static int32_t exprToString(tSQLExpr* pExpr, char** exprString, SColumnIdList* pIdList) { + if (pExpr->nSQLOptr == TK_ID) { // column name + strncpy(*exprString, pExpr->colInfo.z, pExpr->colInfo.n); + *exprString += pExpr->colInfo.n; + + if (pIdList) { + bool validColumn = false; + // record and check the column name + for (int32_t i = 0; i < pIdList->numOfCols; ++i) { + int32_t len = strlen(pIdList->pSchema[i].name); + + if (pExpr->colInfo.n == len && strncasecmp(pExpr->colInfo.z, pIdList->pSchema[i].name, len) == 0) { + pIdList->ids[pIdList->numOfRecordedCols++] = (int16_t)i; + validColumn = true; + break; + } + } + + if (!validColumn) { + return TSDB_CODE_INVALID_SQL; + } + } + } else if (pExpr->nSQLOptr >= TK_BOOL && pExpr->nSQLOptr <= TK_STRING) { // value + *exprString += tVariantToString(&pExpr->val, *exprString); + } else { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t optrToString(tSQLExpr* pExpr, char** exprString) { + char le[] = "<="; + char ge[] = ">="; + char ne[] = "<>"; + char likeOptr[] = "LIKE"; + + switch (pExpr->nSQLOptr) { + case TK_LE: { + *(int16_t*)(*exprString) = *(int16_t*)le; + *exprString += 1; + break; + } + case TK_GE: { + *(int16_t*)(*exprString) = *(int16_t*)ge; + *exprString += 1; + break; + } + case TK_NE: { + *(int16_t*)(*exprString) = *(int16_t*)ne; + *exprString += 1; + break; + } + + case TK_LT: + *(*exprString) = '<'; + break; + case TK_GT: + *(*exprString) = '>'; + break; + case TK_EQ: + *(*exprString) = '='; + break; + case TK_PLUS: + *(*exprString) = '+'; + break; + case TK_MINUS: + *(*exprString) = '-'; + break; + case TK_STAR: + *(*exprString) = '*'; + break; + case TK_DIVIDE: + *(*exprString) = '/'; + break; + case TK_REM: + *(*exprString) = '%'; + break; + case TK_LIKE: { + int32_t len = sprintf(*exprString, " %s ", likeOptr); + *exprString += (len - 1); + break; + } + default: + return TSDB_CODE_INVALID_SQL; + } + + *exprString += 1; + + return TSDB_CODE_SUCCESS; +} + +static int32_t createTableNameList(tSQLExpr* pExpr, char** queryStr) { + tSQLExprList* pList = pExpr->pParam; + if (pList->nExpr <= 0) { + return TSDB_CODE_INVALID_SQL; + } + + int32_t len = 0; + for (int32_t i = 0; i < pList->nExpr; ++i) { + tSQLExpr* pSub = pList->a[i].pNode; + strncpy(*queryStr + len, pSub->val.pz, pSub->val.nLen); + + len += pSub->val.nLen; + (*queryStr)[len++] = ','; + + if (pSub->val.nLen <= 0 || pSub->val.nLen > TSDB_METER_NAME_LEN) { + return TSDB_CODE_INVALID_SQL; + } + } + + *queryStr += len; + + return TSDB_CODE_SUCCESS; +} + +static bool isTbnameToken(SSQLToken* token) { + return (strncasecmp(TSQL_TBNAME_L, token->z, token->n) == 0 && token->n == strlen(TSQL_TBNAME_L)); +} + +static int32_t buildTagQueryCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** queryStr) { + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + + char msg0[] = "invalid table name list"; + char msg1[] = "like operation is not allowed on numeric tags"; + char msg2[] = "in and query condition cannot be mixed up"; + STagCond* pCond = &pCmd->tagCond; + + if (pExpr->nSQLOptr == TK_IN && pRight->nSQLOptr == TK_SET) { + /* table name array list, invoke another routine */ + if (pCond->type == TSQL_STABLE_QTYPE_COND) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + pCond->type = TSQL_STABLE_QTYPE_SET; + + if (!isTbnameToken(&pLeft->colInfo)) { + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = createTableNameList(pRight, queryStr); + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg0, tListLen(msg0)); + } + return ret; + } + + // already use IN predicates + if (pCond->type == TSQL_STABLE_QTYPE_SET) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } else { + pCond->type = TSQL_STABLE_QTYPE_COND; + } + + *(*queryStr) = '('; + *queryStr += 1; + + exprToString(pLeft, queryStr, NULL); + if (optrToString(pExpr, queryStr) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + /* pattern string is too long */ + if (pExpr->nSQLOptr == TK_LIKE) { + if (pRight->val.nLen > TSDB_PATTERN_STRING_MAX_LEN) { + return TSDB_CODE_INVALID_SQL; + } + + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + int32_t numOfCols = pCmd->pMeterMeta->numOfColumns; + int32_t numOfTags = pCmd->pMeterMeta->numOfTags; + + int32_t colIdx = getColumnIndexByName(&pLeft->colInfo, pSchema, numOfCols + numOfTags); + if ((!isTbnameToken(&pLeft->colInfo)) && pSchema[colIdx].type != TSDB_DATA_TYPE_BINARY && + pSchema[colIdx].type != TSDB_DATA_TYPE_NCHAR) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + } + + exprToString(pRight, queryStr, NULL); + + *(*queryStr) = ')'; + *queryStr += 1; + + return TSDB_CODE_SUCCESS; +} + +// todo error handle / such as and /or mixed with +/-/*/ +int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString, SColumnIdList* colIdList) { + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + + *(*exprString) = '('; + *exprString += 1; + + if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { + buildArithmeticExprString(pLeft, exprString, colIdList); + } else { + int32_t ret = exprToString(pLeft, exprString, colIdList); + if (ret != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } + + optrToString(pExpr, exprString); + + if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { + buildArithmeticExprString(pRight, exprString, colIdList); + } else { + int32_t ret = exprToString(pRight, exprString, colIdList); + if (ret != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + } + + *(*exprString) = ')'; + *exprString += 1; + + return TSDB_CODE_SUCCESS; +} + +static int32_t validateSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols) { + if (pExpr->nSQLOptr == TK_ID) { + bool validColumnName = false; + for (int32_t i = 0; i < numOfCols; ++i) { + if (strncasecmp(pExpr->colInfo.z, pSchema[i].name, pExpr->colInfo.n) == 0 && + pExpr->colInfo.n == strlen(pSchema[i].name)) { + if (pSchema[i].type < TSDB_DATA_TYPE_TINYINT || pSchema[i].type > TSDB_DATA_TYPE_DOUBLE) { + return TSDB_CODE_INVALID_SQL; + } + validColumnName = true; + } + } + + if (!validColumnName) { + return TSDB_CODE_INVALID_SQL; + } + + } else if (pExpr->nSQLOptr == TK_FLOAT && (isnan(pExpr->val.dKey) || isinf(pExpr->val.dKey))) { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SSchema* pSchema, int32_t numOfCols) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + tSQLExpr* pLeft = pExpr->pLeft; + if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) { + int32_t ret = validateArithmeticSQLExpr(pLeft, pSchema, numOfCols); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } else { + int32_t ret = validateSQLExpr(pLeft, pSchema, numOfCols); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + + tSQLExpr* pRight = pExpr->pRight; + if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) { + int32_t ret = validateArithmeticSQLExpr(pRight, pSchema, numOfCols); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } else { + int32_t ret = validateSQLExpr(pRight, pSchema, numOfCols); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + } + + return TSDB_CODE_SUCCESS; +} + +static bool isValidExpr(tSQLExpr* pLeft, tSQLExpr* pRight, int32_t optr) { + if (pLeft == NULL || (pRight == NULL && optr != TK_IN)) { + return false; + } + + /* + * filter illegal expression in where clause: + * 1. columnA = columnB + * 2. count(*) > 12 + * 3. sum(columnA) > sum(columnB) + * 4. 4 < 5, 'ABC'>'abc' + * + * However, columnA < 4+12 is valid + */ + if ((pLeft->nSQLOptr == TK_ID && pRight->nSQLOptr == TK_ID) || + (pLeft->nSQLOptr >= TK_COUNT && pLeft->nSQLOptr <= TK_WAVG) || + (pRight->nSQLOptr >= TK_COUNT && pRight->nSQLOptr <= TK_WAVG) || + (pLeft->nSQLOptr >= TK_BOOL && pLeft->nSQLOptr <= TK_BINARY && pRight->nSQLOptr >= TK_BOOL && + pRight->nSQLOptr <= TK_BINARY)) { + return false; + } + + return true; +} + +static int32_t getColumnFilterInfo(SSqlCmd* pCmd, int32_t colIdx, tSQLExpr* pExpr) { + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta); + + char msg[] = "nchar column not available for filter"; + char msg1[] = "non binary column not support like operator"; + char msg2[] = "binary column not support this operator"; + char msg3[] = "column not support in operator"; + + if (pSchema[colIdx].type == TSDB_DATA_TYPE_NCHAR) { + setErrMsg(pCmd, msg, tListLen(msg)); + return -1; + } + + if (pExpr->nSQLOptr == TK_IN) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return -1; + } + + SColumnBase* pColFilter = tscColumnInfoInsert(pCmd, colIdx); + + pColFilter->filterOnBinary = ((pSchema[colIdx].type == TSDB_DATA_TYPE_BINARY) ? 1 : 0); + + if (pColFilter->filterOnBinary) { + if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + } else { + if (pExpr->nSQLOptr == TK_LIKE) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + } + + setColumnFilterInfo(pCmd, pColFilter, colIdx, pExpr); + return TSDB_CODE_SUCCESS; +} + +static int32_t handleExprInQueryCond(SSqlCmd* pCmd, bool* queryTimeRangeIsSet, char** queryStr, int64_t* stime, + int64_t* etime, tSQLExpr* pExpr) { + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta); + + int32_t numOfCols = pMeterMeta->numOfColumns; + int32_t numOfTags = pMeterMeta->numOfTags; + + char msg[] = "meter query cannot use tags filter"; + char msg1[] = "illegal column name"; + char msg2[] = "invalid timestamp"; + + int32_t colIdx = getColumnIndexByName(&pLeft->colInfo, pSchema, numOfCols + numOfTags); + bool istbname = isTbnameToken(&pLeft->colInfo); + + if (colIdx < 0 && (!istbname)) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (colIdx == 0) { // query on time range + *queryTimeRangeIsSet = true; + if (getTimeRange(stime, etime, pRight, pExpr->nSQLOptr, pMeterMeta->precision) != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + } else if (colIdx >= numOfCols || istbname) { // query on tags + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + return buildTagQueryCondString(pCmd, pExpr, queryStr); + } else { // query on other columns + return getColumnFilterInfo(pCmd, colIdx, pExpr); + } + + return TSDB_CODE_SUCCESS; +} + +static void insertLeftParentheses(char** queryStr, char* p) { + int32_t len = (*queryStr - p); + memmove(p + 1, p, len); + p[0] = '('; + *queryStr += 1; +} + +static void removeLeftParentheses(char** queryStr, char* p) { + // remove the left parentheses + memmove(p, p + 1, *queryStr - p - 1); + *queryStr -= 1; +} + +int32_t getQueryCondExprImpl(SSqlCmd* pCmd, tSQLExpr* pExpr, int64_t* stime, int64_t* etime, bool* queryTimeRangeIsSet, + char** queryStr) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + + if (!isValidExpr(pLeft, pRight, pExpr->nSQLOptr)) { + return TSDB_CODE_INVALID_SQL; + } + + if (pExpr->nSQLOptr == TK_AND || pExpr->nSQLOptr == TK_OR) { + int64_t stime1 = 0, etime1 = INT64_MAX; + bool tmRangeIsSet = false; + + char* p = *queryStr; + int32_t ret = getQueryCondExprImpl(pCmd, pExpr->pLeft, &stime1, &etime1, &tmRangeIsSet, queryStr); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (tmRangeIsSet) { + *stime = stime1; + *etime = etime1; + + *queryTimeRangeIsSet = true; + } + + if (p == *queryStr) { + /* + * query on timestamp or filter on normal columns + * no data serialize to string + * + * do nothing + */ + } else { // serialize relational operator for tag filter operation + if (pCmd->tagCond.type == TSQL_STABLE_QTYPE_SET) { + /* using id in clause, and/or is not needed */ + + } else { + assert(pCmd->tagCond.type == TSQL_STABLE_QTYPE_COND); + insertLeftParentheses(queryStr, p); + + char* optr = (pExpr->nSQLOptr == TK_AND) ? "and" : "or"; + int32_t len = (pExpr->nSQLOptr == TK_AND) ? 3 : 2; + strcpy(*queryStr, optr); + + *queryStr += len; + } + } + + int64_t stime2 = 0, etime2 = INT64_MAX; + tmRangeIsSet = false; + char* p2 = *queryStr; + ret = getQueryCondExprImpl(pCmd, pExpr->pRight, &stime2, &etime2, &tmRangeIsSet, queryStr); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (tmRangeIsSet) { + *queryTimeRangeIsSet = true; + if (pExpr->nSQLOptr == TK_AND) { + *stime = stime2 > (*stime) ? stime2 : (*stime); + *etime = etime2 < (*etime) ? etime2 : (*etime); + + } else { + char msg1[] = "not support multi-segments query time ranges"; + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + } + + if (p != *queryStr) { // either the left and right hand side has tags + // filter + if (p2 == *queryStr && p != p2 && pCmd->tagCond.type == TSQL_STABLE_QTYPE_COND) { + /* + * has no tags filter info on the right hand side + * has filter on the left hand side + * + * rollback string + */ + int32_t len = (pExpr->nSQLOptr == TK_AND) ? 3 : 2; + *queryStr -= len; + + removeLeftParentheses(queryStr, p); + } else if (p2 != *queryStr && p == p2) { + // do nothing + } else { + if (pCmd->tagCond.type == TSQL_STABLE_QTYPE_COND) { + *(*queryStr) = ')'; + *queryStr += 1; + } + } + } + + return TSDB_CODE_SUCCESS; + } + + if (pLeft->nSQLOptr == TK_ID && (pRight->nSQLOptr == TK_INTEGER || pRight->nSQLOptr == TK_FLOAT || + pRight->nSQLOptr == TK_STRING || pRight->nSQLOptr == TK_BOOL)) { + // do nothing + } else if (pRight->nSQLOptr == TK_ID && (pLeft->nSQLOptr == TK_INTEGER || pLeft->nSQLOptr == TK_FLOAT || + pLeft->nSQLOptr == TK_STRING || pLeft->nSQLOptr == TK_BOOL)) { + /* + * exchange value of the left-handside and the value of the right-handside + * to make sure that the value of filter expression always locates in right-handside and + * the column-id is at the left hande side. + */ + uint32_t optr = 0; + switch (pExpr->nSQLOptr) { + case TK_LE: + optr = TK_GE; + break; + case TK_LT: + optr = TK_GT; + break; + case TK_GT: + optr = TK_LT; + break; + case TK_GE: + optr = TK_LE; + break; + default: + optr = pExpr->nSQLOptr; + } + + pExpr->nSQLOptr = optr; + + tSQLExpr* pTmpExpr = pExpr->pLeft; + pExpr->pLeft = pExpr->pRight; + pExpr->pRight = pTmpExpr; + } + + return handleExprInQueryCond(pCmd, queryTimeRangeIsSet, queryStr, stime, etime, pExpr); +} + +int tableNameCompar(const void* lhs, const void* rhs) { + char* left = *(char**)lhs; + char* right = *(char**)rhs; + + int32_t ret = strcmp(left, right); + + if (ret == 0) { + return 0; + } + + return ret > 0 ? 1 : -1; +} + +static int32_t setMetersIDForMetricQuery(SSqlObj* pSql, char* tmpTagCondBuf) { + SSqlCmd* pCmd = &pSql->cmd; + char msg[] = "meter name too long"; + + pCmd->tagCond.allocSize = 4096; + pCmd->tagCond.pData = realloc(pCmd->tagCond.pData, pCmd->tagCond.allocSize); + + char db[TSDB_METER_ID_LEN] = {0}; + + /* remove the duplicated input table names */ + int32_t num = 0; + char** segments = strsplit(tmpTagCondBuf, ",", &num); + qsort(segments, num, POINTER_BYTES, tableNameCompar); + + int32_t j = 1; + for (int32_t i = 1; i < num; ++i) { + if (strcmp(segments[i], segments[i - 1]) != 0) { + segments[j++] = segments[i]; + } + } + num = j; + + extractDBName(pCmd->name, db); + SSQLToken tDB = { + .z = db, .n = strlen(db), .type = TK_STRING, + }; + + char* acc = getAccountId(pSql); + for (int32_t i = 0; i < num; ++i) { + if (pCmd->tagCond.allocSize - pCmd->tagCond.len < (TSDB_METER_ID_LEN + 1)) { + /* remain space is insufficient, buy more spaces */ + pCmd->tagCond.allocSize = (pCmd->tagCond.allocSize << 1); + pCmd->tagCond.pData = realloc(pCmd->tagCond.pData, pCmd->tagCond.allocSize); + } + + if (i >= 1) { + pCmd->tagCond.pData[pCmd->tagCond.len++] = ','; + } + + int32_t xlen = strlen(segments[i]); + + SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; + int32_t ret = setObjFullName(pCmd->tagCond.pData + pCmd->tagCond.len, acc, &tDB, &t, &xlen); + + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + tfree(segments); + return ret; + } + + pCmd->tagCond.len += xlen; + } + + tfree(segments); + return TSDB_CODE_SUCCESS; +} + +int32_t buildQueryCond(SSqlObj* pSql, tSQLExpr* pExpr) { + SSqlCmd* pCmd = &pSql->cmd; + + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + char msg1[] = "invalid expression"; + char msg2[] = "meter is not allowed"; + + tSQLExpr* pLeft = pExpr->pLeft; + tSQLExpr* pRight = pExpr->pRight; + if (pLeft == NULL || pRight == NULL || (pLeft->nSQLOptr == TK_ID && pRight->nSQLOptr == TK_ID)) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + bool setTimeRange = false; + + /* tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space */ + char tmpTagCondBuf[TSDB_MAX_SQL_LEN] = {0}; + char* q = tmpTagCondBuf; + + pCmd->stime = 0; + pCmd->etime = INT64_MAX; + int32_t ret = getQueryCondExprImpl(pCmd, pExpr, &pCmd->stime, &pCmd->etime, &setTimeRange, &q); + if (ret != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // query condition for tags + if (q == tmpTagCondBuf) { + pCmd->tagCond.len = 0; + } else { + int32_t qlen = (q - tmpTagCondBuf) + 1; + tmpTagCondBuf[qlen - 1] = 0; + + if (pCmd->tagCond.type == TSQL_STABLE_QTYPE_SET) { + if (!UTIL_METER_IS_METRIC(pCmd)) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + ret = setMetersIDForMetricQuery(pSql, tmpTagCondBuf); + } else { + if (pCmd->tagCond.allocSize < qlen + 1) { + pCmd->tagCond.allocSize = qlen + 1; + pCmd->tagCond.pData = realloc(pCmd->tagCond.pData, pCmd->tagCond.allocSize); + } + + strcpy(pCmd->tagCond.pData, tmpTagCondBuf); + pCmd->tagCond.len = qlen; // plus one null-terminated symbol + } + + pCmd->tagCond.pData[pCmd->tagCond.len] = 0; + } + + return ret; +} + +int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t optr, int16_t timePrecision) { + assert(pRight->nSQLOptr == TK_INTEGER || pRight->nSQLOptr == TK_FLOAT || pRight->nSQLOptr == TK_STRING || + pRight->nSQLOptr == TK_TIMESTAMP); + + int64_t val = 0; + bool parsed = false; + if (pRight->val.nType == TSDB_DATA_TYPE_BINARY) { + strdequote(pRight->val.pz); + char* seg = strnchr(pRight->val.pz, '-', pRight->val.nLen); + if (seg != NULL) { + if (taosParseTime(pRight->val.pz, &val, pRight->val.nLen, TSDB_TIME_PRECISION_MICRO) == TSDB_CODE_SUCCESS) { + parsed = true; + } else { + return TSDB_CODE_INVALID_SQL; + } + } + } else if (pRight->nSQLOptr == TK_INTEGER && timePrecision == TSDB_TIME_PRECISION_MILLI) { + /* + * if the pRight->nSQLOptr == TK_INTEGER/TK_FLOAT, the value is adaptive, we + * need the time precision of metermeta to transfer the value in MICROSECOND + * + * Additional check to avoid data overflow + */ + if (pRight->val.i64Key <= INT64_MAX / 1000) { + pRight->val.i64Key *= 1000; + } + } else if (pRight->nSQLOptr == TK_FLOAT && timePrecision == TSDB_TIME_PRECISION_MILLI) { + pRight->val.dKey *= 1000; + } + + if (!parsed) { + /* + * failed to parse timestamp in regular formation, try next + * it may be a epoch time in string format + */ + tVariantDump(&pRight->val, (char*)&val, TSDB_DATA_TYPE_BIGINT); + + /* + * transfer it into MICROSECOND format if it is a string, since for + * TK_INTEGER/TK_FLOAT the value has been transferred + * + * additional check to avoid data overflow + */ + if (pRight->nSQLOptr == TK_STRING && timePrecision == TSDB_TIME_PRECISION_MILLI) { + if (val <= INT64_MAX / 1000) { + val *= 1000; + } + } + } + + int32_t delta = 1; + /* for millisecond, delta is 1ms=1000us */ + if (timePrecision == TSDB_TIME_PRECISION_MILLI) { + delta *= 1000; + } + + if (optr == TK_LE) { + *etime = val; + } else if (optr == TK_LT) { + *etime = val - delta; + } else if (optr == TK_GT) { + *stime = val + delta; + } else if (optr == TK_GE) { + *stime = val; + } else if (optr == TK_EQ) { + *stime = val; + *etime = *stime; + } + return TSDB_CODE_SUCCESS; +} + +int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + char* fieldName = tscFieldInfoGetField(pCmd, i)->name; + for (int32_t j = 0; j < TSDB_COL_NAME_LEN && fieldName[j] != 0; ++j) { + if (fieldName[j] == '(' || fieldName[j] == ')' || fieldName[j] == '*' || fieldName[j] == ',' || + fieldName[j] == '.' || fieldName[j] == '/' || fieldName[j] == '+' || fieldName[j] == '-' || + fieldName[j] == ' ') { + fieldName[j] = '_'; + } + } + fieldName[TSDB_COL_NAME_LEN - 1] = 0; + } + + // the column name may be identical, here check again + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + char* fieldName = tscFieldInfoGetField(pCmd, i)->name; + for (int32_t j = i + 1; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { + if (strncasecmp(fieldName, tscFieldInfoGetField(pCmd, j)->name, TSDB_COL_NAME_LEN) == 0) { + char msg[] = "duplicated column name in new table"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t setFillPolicy(SSqlCmd* pCmd, SQuerySQL* pQuerySQL) { + tVariantList* pFillToken = pQuerySQL->fillType; + tVariantListItem* pItem = &pFillToken->a[0]; + + const int32_t START_INTERPO_COL_IDX = 1; + char msg[] = "illegal value or data overflow"; + char msg1[] = "value is expected"; + char msg2[] = "invalid fill option"; + + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + if (strncasecmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) { + pCmd->interpoType = TSDB_INTERPO_NONE; + } else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) { + pCmd->interpoType = TSDB_INTERPO_NULL; + for (int32_t i = START_INTERPO_COL_IDX; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); + setNull((char*)&pCmd->defaultVal[i], pFields->type, pFields->bytes); + } + } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { + pCmd->interpoType = TSDB_INTERPO_PREV; + } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { + // not support yet + pCmd->interpoType = TSDB_INTERPO_LINEAR; + } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { + pCmd->interpoType = TSDB_INTERPO_SET_VALUE; + + if (pFillToken->nExpr == 1) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t startPos = 1; + int32_t numOfFillVal = pFillToken->nExpr - 1; + + /* for point interpolation query, we do not have the timestamp column */ + if (tscIsPointInterpQuery(pCmd)) { + startPos = 0; + + if (numOfFillVal > pCmd->fieldsInfo.numOfOutputCols) { + numOfFillVal = pCmd->fieldsInfo.numOfOutputCols; + } + } else { + numOfFillVal = + (pFillToken->nExpr > pCmd->fieldsInfo.numOfOutputCols) ? pCmd->fieldsInfo.numOfOutputCols : pFillToken->nExpr; + } + + int32_t j = 1; + + for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); + + int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pCmd->defaultVal[i], pFields->type); + if (ret != TSDB_CODE_SUCCESS) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { + setNull((char*)(&pCmd->defaultVal[i]), pFields->type, pFields->bytes); + } + } + + if ((pFillToken->nExpr < pCmd->fieldsInfo.numOfOutputCols) || + ((pFillToken->nExpr - 1 < pCmd->fieldsInfo.numOfOutputCols) && (tscIsPointInterpQuery(pCmd)))) { + tVariantListItem* lastItem = &pFillToken->a[pFillToken->nExpr - 1]; + + for (int32_t i = numOfFillVal; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD* pFields = tscFieldInfoGetField(pCmd, i); + tVariantDump(&lastItem->pVar, (char*)&pCmd->defaultVal[i], pFields->type); + + if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { + setNull((char*)(&pCmd->defaultVal[i]), pFields->type, pFields->bytes); + } + } + } + } else { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +static void setDefaultOrderInfo(SSqlCmd* pCmd) { + /* set default timestamp order information for all queries */ + pCmd->order.order = TSQL_SO_ASC; + + if (isTopBottomQuery(pCmd)) { + pCmd->order.order = TSQL_SO_ASC; + pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } else { + pCmd->order.orderColId = -1; + } + + /* for metric query, set default ascending order for group output */ + if (UTIL_METER_IS_METRIC(pCmd)) { + pCmd->groupbyExpr.orderType = TSQL_SO_ASC; + } +} + +int32_t setOrderByClause(SSqlCmd* pCmd, SQuerySQL* pQuerySql, SSchema* pSchema, int32_t numOfCols) { + char msg[] = "only support order by primary timestamp"; + char msg3[] = "invalid column name"; + char msg5[] = "only support order by primary timestamp and queried column"; + char msg6[] = "only support order by primary timestamp and first tag in groupby clause"; + + setDefaultOrderInfo(pCmd); + + if (pQuerySql->pSortOrder == NULL) { + return TSDB_CODE_SUCCESS; + } + + tVariantList* pSortorder = pQuerySql->pSortOrder; + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { + if (pSortorder->nExpr > 1) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + } else { + if (pSortorder->nExpr > 2) { + setErrMsg(pCmd, msg6, tListLen(msg6)); + return TSDB_CODE_INVALID_SQL; + } + } + + // handle the first part of order by + tVariant* pVar = &pSortorder->a[0].pVar; + + // e.g., order by 1 asc, return directly with out further check. + if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { + return TSDB_CODE_SUCCESS; + } + + SSQLToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; + + if (UTIL_METER_IS_METRIC(pCmd)) { // metric query + SSchema* pTagSchema = tsGetTagSchema(pCmd->pMeterMeta); + int32_t columnIndex = getColumnIndexByName(&columnName, pTagSchema, pCmd->pMeterMeta->numOfTags); + bool orderByTags = false; + bool orderByTS = false; + if (pCmd->groupbyExpr.tagIndex[0] == columnIndex) { + if (columnIndex >= 0 || (strncasecmp(columnName.z, TSQL_TBNAME_L, 6) == 0)) { + orderByTags = true; + } + } + + columnIndex = getColumnIndexByName(&columnName, pSchema, numOfCols); + if (PRIMARYKEY_TIMESTAMP_COL_INDEX == columnIndex) { + orderByTS = true; + } + + if (!(orderByTags || orderByTS)) { + setErrMsg(pCmd, msg6, tListLen(msg6)); + return TSDB_CODE_INVALID_SQL; + } else { + assert(!(orderByTags && orderByTS)); + } + + if (pSortorder->nExpr == 1) { + if (orderByTags) { + pCmd->groupbyExpr.orderIdx = columnIndex; + pCmd->groupbyExpr.orderType = pQuerySql->pSortOrder->a[0].sortOrder; + } else { + pCmd->order.order = pSortorder->a[0].sortOrder; + pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } + } + + if (pSortorder->nExpr == 2) { + tVariant* pVar2 = &pSortorder->a[1].pVar; + SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; + columnIndex = getColumnIndexByName(&cname, pSchema, numOfCols); + if (columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return TSDB_CODE_INVALID_SQL; + } else { + pCmd->order.order = pSortorder->a[1].sortOrder; + pCmd->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } + } + + } else { // meter query + int32_t columnIndex = getColumnIndexByName(&columnName, pSchema, numOfCols); + if (columnIndex <= -1) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + if (columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pCmd)) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return TSDB_CODE_INVALID_SQL; + } + + if (isTopBottomQuery(pCmd) && pCmd->nAggTimeInterval >= 0) { + /* order of top/bottom query in interval is not valid */ + SSqlExpr* pExpr = tscSqlExprGet(pCmd, 0); + assert(pExpr->sqlFuncId == TSDB_FUNC_TS); + + pExpr = tscSqlExprGet(pCmd, 1); + if (pExpr->colInfo.colIdx != columnIndex && columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + pCmd->order.orderColId = pSchema[columnIndex].colId; + return TSDB_CODE_SUCCESS; + } + + pCmd->order.order = pQuerySql->pSortOrder->a[0].sortOrder; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { + SSqlCmd* pCmd = &pSql->cmd; + + SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo; + pCmd->command = TSDB_SQL_ALTER_TABLE; + + if (setMeterID(pSql, &(pAlterSQL->name)) != TSDB_CODE_SUCCESS) { + char msg[] = "table name too long"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t ret = tscGetMeterMeta(pSql, pCmd->name); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + SSchema* pSchema = tsGetSchema(pMeterMeta); + + if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD || pInfo->sqlType == ALTER_TABLE_TAGS_DROP || + pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { + char msg[] = "manipulation of tag available for metric"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + } else if ((pInfo->sqlType == ALTER_TABLE_TAGS_SET) && (UTIL_METER_IS_METRIC(pCmd))) { + char msg[] = "set tag value only available for table"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } else if ((pInfo->sqlType == ALTER_TABLE_ADD_COLUMN || pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) && + UTIL_METER_IS_CREATE_FROM_METRIC(pCmd)) { + char msg[] = "column can only be modified by metric"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (pInfo->sqlType == ALTER_TABLE_TAGS_ADD) { + pCmd->count = TSDB_ALTER_TABLE_ADD_TAG_COLUMN; + + tFieldList* pFieldList = pAlterSQL->pAddColumns; + if (pFieldList->nField > 1) { + char msg[] = "only support add one tag"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (!validateOneTags(pCmd, &pFieldList->p[0])) { + return TSDB_CODE_INVALID_SQL; + } + + tscFieldInfoSetValFromField(&pCmd->fieldsInfo, 0, &pFieldList->p[0]); + pCmd->numOfCols = 1; // only one column + + } else if (pInfo->sqlType == ALTER_TABLE_TAGS_DROP) { + pCmd->count = TSDB_ALTER_TABLE_DROP_TAG_COLUMN; + + char msg1[] = "no tags can be dropped"; + char msg2[] = "only support one tag"; + char msg3[] = "tag name too long"; + char msg4[] = "illegal tag name"; + char msg5[] = "primary tag cannot be dropped"; + + if (pMeterMeta->numOfTags == 1) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + // numOfTags == 1 + if (pAlterSQL->varList->nExpr > 1) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + tVariantListItem* pItem = &pAlterSQL->varList->a[0]; + if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t idx = -1; + for (int32_t i = 0; i < pMeterMeta->numOfTags; ++i) { + int32_t tagIdx = i + pMeterMeta->numOfColumns; + char* tagName = pSchema[tagIdx].name; + size_t nLen = strlen(tagName); + + if ((strncasecmp(tagName, pItem->pVar.pz, nLen) == 0) && (pItem->pVar.nLen == nLen)) { + idx = i; + break; + } + } + + if (idx == -1) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } else if (idx == 0) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return TSDB_CODE_INVALID_SQL; + } + + char name[128] = {0}; + strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); + tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + + pCmd->numOfCols = 1; // only one column + + } else if (pInfo->sqlType == ALTER_TABLE_TAGS_CHG) { + pCmd->count = TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN; + tVariantList* pVarList = pAlterSQL->varList; + if (pVarList->nExpr > 2) { + return TSDB_CODE_INVALID_SQL; + } + + if (pVarList->a[0].pVar.nLen > TSDB_COL_NAME_LEN || pVarList->a[1].pVar.nLen > TSDB_COL_NAME_LEN) { + char msg[] = "tag name too long"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + tVariantListItem* pSrcItem = &pAlterSQL->varList->a[0]; + tVariantListItem* pDstItem = &pAlterSQL->varList->a[1]; + + bool srcFound = false; + bool dstFound = false; + for (int32_t i = 0; i < pMeterMeta->numOfTags; ++i) { + int32_t tagIdx = i + pMeterMeta->numOfColumns; + char* tagName = pSchema[tagIdx].name; + + size_t nameLen = strlen(tagName); + if ((!srcFound) && strncasecmp(tagName, pSrcItem->pVar.pz, nameLen) == 0) { + srcFound = true; + } + + if ((!dstFound) && strncasecmp(tagName, pDstItem->pVar.pz, nameLen) == 0) { + dstFound = true; + } + } + + if ((!srcFound) || dstFound) { + char msg[] = "invalid tag name"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + char name[128] = {0}; + strncpy(name, pVarList->a[0].pVar.pz, pVarList->a[0].pVar.nLen); + tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + + memset(name, 0, tListLen(name)); + strncpy(name, pVarList->a[1].pVar.pz, pVarList->a[1].pVar.nLen); + tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + pCmd->numOfCols = 2; + + } else if (pInfo->sqlType == ALTER_TABLE_TAGS_SET) { + pCmd->count = TSDB_ALTER_TABLE_UPDATE_TAG_VAL; + + // Note: update can only be applied to meter not metric. + // the following is handle display tags value for meters created according to metric + char* pTagValue = tsGetTagsValue(pCmd->pMeterMeta); + + tVariantList* pVarList = pAlterSQL->varList; + tVariant* pTagName = &pVarList->a[0].pVar; + + if (pTagName->nLen > TSDB_COL_NAME_LEN) { + char msg[] = "tag name too long"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t tagsIndex = -1; + SSchema* pTagsSchema = tsGetTagSchema(pCmd->pMeterMeta); + for (int32_t i = 0; i < pCmd->pMeterMeta->numOfTags; ++i) { + if (strcmp(pTagName->pz, pTagsSchema[i].name) == 0 && strlen(pTagsSchema[i].name) == pTagName->nLen) { + tagsIndex = i; + tVariantDump(&pVarList->a[1].pVar, pCmd->payload, pTagsSchema[i].type); + break; + } + + pTagValue += pTagsSchema[i].bytes; + } + + if (tagsIndex == -1) { + char msg[] = "invalid tag name"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + // validate the length of binary + if (pTagsSchema[tagsIndex].type == TSDB_DATA_TYPE_BINARY && + pVarList->a[1].pVar.nLen > pTagsSchema[tagsIndex].bytes) { + char msg[] = "tag value too long"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + char name[128] = {0}; + strncpy(name, pTagName->pz, pTagName->nLen); + tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + + pCmd->numOfCols = 1; + } else if (pInfo->sqlType == ALTER_TABLE_ADD_COLUMN) { + pCmd->count = TSDB_ALTER_TABLE_ADD_COLUMN; + + tFieldList* pFieldList = pAlterSQL->pAddColumns; + if (pFieldList->nField > 1) { + char msg[] = "only support add one column"; + setErrMsg(pCmd, msg, tListLen(msg)); + return TSDB_CODE_INVALID_SQL; + } + + if (!validateOneColumn(pCmd, &pFieldList->p[0])) { + return TSDB_CODE_INVALID_SQL; + } + + tscFieldInfoSetValFromField(&pCmd->fieldsInfo, 0, &pFieldList->p[0]); + pCmd->numOfCols = 1; // only one column + + } else if (pInfo->sqlType == ALTER_TABLE_DROP_COLUMN) { + pCmd->count = TSDB_ALTER_TABLE_DROP_COLUMN; + + char msg1[] = "no columns can be dropped"; + char msg2[] = "only support one column"; + char msg3[] = "column name too long"; + char msg4[] = "illegal column name"; + char msg5[] = "primary timestamp column cannot be dropped"; + + if (pMeterMeta->numOfColumns == TSDB_MIN_COLUMNS) { // + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + + if (pAlterSQL->varList->nExpr > 1) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + tVariantListItem* pItem = &pAlterSQL->varList->a[0]; + if (pItem->pVar.nLen > TSDB_COL_NAME_LEN) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + + int32_t idx = -1; + for (int32_t i = 0; i < pMeterMeta->numOfColumns; ++i) { + char* colName = pSchema[i].name; + size_t len = strlen(colName); + + if ((strncasecmp(colName, pItem->pVar.pz, len) == 0) && (len == pItem->pVar.nLen)) { + idx = i; + break; + } + } + + if (idx == -1) { + setErrMsg(pCmd, msg4, tListLen(msg4)); + return TSDB_CODE_INVALID_SQL; + } else if (idx == 0) { + setErrMsg(pCmd, msg5, tListLen(msg5)); + return TSDB_CODE_INVALID_SQL; + } + + char name[128] = {0}; + strncpy(name, pItem->pVar.pz, pItem->pVar.nLen); + tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_INT, name, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize); + + pCmd->numOfCols = 1; // only one column + } + + return TSDB_CODE_SUCCESS; +} + +int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd) { + char msg0[] = "sample interval can not be less than 10ms."; + char msg1[] = "functions not allowed in select clause"; + + if (pCmd->nAggTimeInterval != 0 && pCmd->nAggTimeInterval < 10) { + setErrMsg(pCmd, msg0, tListLen(msg0)); + return TSDB_CODE_INVALID_SQL; + } + + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + int32_t functId = tscSqlExprGet(pCmd, i)->sqlFuncId; + if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd) { + bool isProjectionFunction = false; + char msg[] = "column projection is not compatible with interval"; + + // multi-output set/ todo refactor + for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, k); + if (pExpr->sqlFuncId == TSDB_FUNC_PRJ || pExpr->sqlFuncId == TSDB_FUNC_TAGPRJ || + pExpr->sqlFuncId == TSDB_FUNC_DIFF || pExpr->sqlFuncId == TSDB_FUNC_ARITHM) { + isProjectionFunction = true; + } + } + if (pCmd->metricQuery == 0 || isProjectionFunction == true) { + setErrMsg(pCmd, msg, tListLen(msg)); + } + + return isProjectionFunction == true ? TSDB_CODE_INVALID_SQL : TSDB_CODE_SUCCESS; +} + +typedef struct SDNodeDynConfOption { + char* name; + int32_t len; +} SDNodeDynConfOption; + +int32_t validateDNodeConfig(tDCLSQL* pOptions) { + if (pOptions->nTokens < 2 || pOptions->nTokens > 3) { + return TSDB_CODE_INVALID_SQL; + } + + SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[13] = { + {"resetLog", 8}, {"resetQueryCache", 15}, {"dDebugFlag", 10}, {"taosDebugFlag", 13}, + {"tmrDebugFlag", 12}, {"cDebugFlag", 10}, {"uDebugFlag", 10}, {"mDebugFlag", 10}, + {"sdbDebugFlag", 12}, {"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"qDebugflag", 10}, + {"debugFlag", 9}}; + + SSQLToken* pOptionToken = &pOptions->a[1]; + + if (pOptions->nTokens == 2) { + // reset log and reset query cache does not need value + for (int32_t i = 0; i < 2; ++i) { + SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i]; + if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { + return TSDB_CODE_SUCCESS; + } + } + } else { + SSQLToken* pValToken = &pOptions->a[2]; + + int32_t val = strtol(pValToken->z, NULL, 10); + if (val < 131 || val > 199) { + /* options value is out of valid range */ + return TSDB_CODE_INVALID_SQL; + } + + for (int32_t i = 2; i < tListLen(DNODE_DYNAMIC_CFG_OPTIONS); ++i) { + SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i]; + + if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) { + /* options is valid */ + return TSDB_CODE_SUCCESS; + } + } + } + + return TSDB_CODE_INVALID_SQL; +} + +int32_t validateColumnName(char* name) { + bool ret = isKeyWord(name, strlen(name)); + if (ret) { + return TSDB_CODE_INVALID_SQL; + } + + SSQLToken token = { + .z = name, + }; + token.n = tSQLGetToken(name, &token.type); + + if (token.type != TK_STRING && token.type != TK_ID) { + return TSDB_CODE_INVALID_SQL; + } + + if (token.type == TK_STRING) { + strdequote(token.z); + strtrim(token.z); + token.n = (uint32_t)strlen(token.z); + + int32_t k = tSQLGetToken(token.z, &token.type); + if (k != token.n) { + return TSDB_CODE_INVALID_SQL; + } + + return validateColumnName(token.z); + } else { + if (isNumber(&token)) { + return TSDB_CODE_INVALID_SQL; + } + } + + return TSDB_CODE_SUCCESS; +} + +bool hasTimestampForPointInterpQuery(SSqlCmd* pCmd) { + if (!tscIsPointInterpQuery(pCmd)) { + return true; + } + + return (pCmd->stime == pCmd->etime) && (pCmd->stime != 0); +} + +int32_t setLimitOffsetValueInfo(SSqlObj* pSql, SQuerySQL* pQuerySql) { + SSqlCmd* pCmd = &pSql->cmd; + bool isMetric = UTIL_METER_IS_METRIC(pCmd); + + char msg0[] = "soffset can not be less than 0"; + char msg1[] = "offset can not be less than 0"; + char msg2[] = "slimit/soffset only available for stable query"; + char msg3[] = "function not supported on table"; + + // handle the limit offset value, validate the limit + pCmd->limit = pQuerySql->limit; + pCmd->glimit = pQuerySql->glimit; + + if (isMetric) { + bool queryOnTags = false; + int32_t ret = tscQueryOnlyMetricTags(pCmd, &queryOnTags); + if (ret != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + if (queryOnTags == true) { // local handle the metric tag query + pCmd->command = TSDB_SQL_RETRIEVE_TAGS; + } + + if (pCmd->glimit.limit == 0 || pCmd->limit.limit == 0) { + tscTrace("%p limit 0, no output result", pSql); + pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + + if (pCmd->glimit.offset < 0) { + setErrMsg(pCmd, msg0, tListLen(msg0)); + return TSDB_CODE_INVALID_SQL; + } + + /* + * get the distribution of all meters among available vnodes that satisfy query condition from mnode , + * then launching multiple async-queries on referenced vnodes, which is the first-stage query operation] + */ + int32_t code = tscGetMetricMeta(pSql, pCmd->name); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + /* + * Query results are empty. Therefore, the result is filled with 0 if count function is employed in selection clause. + * + * The fill of empty result is required only when interval clause is absent. + */ + SMetricMeta* pMetricMeta = pCmd->pMetricMeta; + if (pCmd->pMeterMeta == NULL || pMetricMeta == NULL || pMetricMeta->numOfVnodes == 0 || + pMetricMeta->numOfMeters == 0) { + tscTrace("%p no table in metricmeta, no output result", pSql); + pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + } + + // keep original limitation value in globalLimit + pCmd->globalLimit = pCmd->limit.limit; + } else { + if (pCmd->glimit.limit != -1 || pCmd->glimit.offset != 0) { + setErrMsg(pCmd, msg2, tListLen(msg2)); + return TSDB_CODE_INVALID_SQL; + } + + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + if (pExpr->colInfo.colIdx == -1) { + setErrMsg(pCmd, msg3, tListLen(msg3)); + return TSDB_CODE_INVALID_SQL; + } + } + + if (pCmd->limit.limit == 0) { + tscTrace("%p limit 0, no output result", pSql); + pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + } + + if (pCmd->limit.offset < 0) { + setErrMsg(pCmd, msg1, tListLen(msg1)); + return TSDB_CODE_INVALID_SQL; + } + } + + return TSDB_CODE_SUCCESS; +} diff --git a/src/client/src/tscSQLParserImpl.c b/src/client/src/tscSQLParserImpl.c new file mode 100644 index 000000000000..2dba17e938cd --- /dev/null +++ b/src/client/src/tscSQLParserImpl.c @@ -0,0 +1,678 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tsql.h" +#include "tstoken.h" +#include "ttime.h" +#include "tutil.h" + +int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) { + void *pParser = ParseAlloc(malloc); + pSQLInfo->validSql = true; + + int32_t i = 0; + while (1) { + SSQLToken t0 = {0}; + + if (pStr[i] == 0) { + Parse(pParser, 0, t0, pSQLInfo); + goto abort_parse; + } + + t0.n = tSQLGetToken((char *)&pStr[i], &t0.type); + t0.z = (char *)(pStr + i); + i += t0.n; + + switch (t0.type) { + case TK_SPACE: + case TK_COMMENT: { + break; + } + case TK_SEMI: { + Parse(pParser, 0, t0, pSQLInfo); + goto abort_parse; + } + case TK_ILLEGAL: { + sprintf(pSQLInfo->pzErrMsg, "unrecognized token: \"%s\"", t0.z); + pSQLInfo->validSql = false; + goto abort_parse; + } + default: + Parse(pParser, t0.type, t0, pSQLInfo); + if (pSQLInfo->validSql == false) { + goto abort_parse; + } + } + } + +abort_parse: + ParseFree(pParser, free); + return 0; +} + +tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SSQLToken *pToken) { + if (pList == NULL) { + pList = calloc(1, sizeof(tSQLExprList)); + } + + if (pList->nAlloc <= pList->nExpr) { // + pList->nAlloc = (pList->nAlloc << 1) + 4; + pList->a = realloc(pList->a, pList->nAlloc * sizeof(pList->a[0])); + if (pList->a == 0) { + pList->nExpr = pList->nAlloc = 0; + return pList; + } + } + assert(pList->a != 0); + + if (pNode || pToken) { + struct tSQLExprItem *pItem = &pList->a[pList->nExpr++]; + memset(pItem, 0, sizeof(*pItem)); + pItem->pNode = pNode; + if (pToken) { // set the as clause + pItem->aliasName = malloc(pToken->n + 1); + strncpy(pItem->aliasName, pToken->z, pToken->n); + pItem->aliasName[pToken->n] = 0; + + strdequote(pItem->aliasName); + } + } + return pList; +} + +void tSQLExprListDestroy(tSQLExprList *pList) { + if (pList == NULL) return; + + for (int32_t i = 0; i < pList->nExpr; ++i) { + if (pList->a[i].aliasName != NULL) { + free(pList->a[i].aliasName); + } + tSQLExprDestroy(pList->a[i].pNode); + } + + free(pList->a); + free(pList); +} + +tSQLExpr *tSQLExprIdValueCreate(SSQLToken *pToken, int32_t optrType) { + tSQLExpr *nodePtr = calloc(1, sizeof(tSQLExpr)); + + if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { + toTSDBType(pToken->type); + + tVariantCreate(&nodePtr->val, pToken); + nodePtr->nSQLOptr = optrType; + } else if (optrType == TK_NOW) { + // default use microsecond + nodePtr->val.i64Key = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO); + nodePtr->val.nType = TSDB_DATA_TYPE_BIGINT; + nodePtr->nSQLOptr = TK_TIMESTAMP; + // TK_TIMESTAMP used to denote the time value is in microsecond + } else if (optrType == TK_VARIABLE) { + int32_t ret = getTimestampInUsFromStr(pToken->z, pToken->n, &nodePtr->val.i64Key); + UNUSED(ret); + + nodePtr->val.nType = TSDB_DATA_TYPE_BIGINT; + nodePtr->nSQLOptr = TK_TIMESTAMP; + } else { // must be field id if not numbers + if (pToken != NULL) { + assert(optrType == TK_ID); + /* it must be the column name (tk_id) */ + nodePtr->colInfo = *pToken; + } else { + assert(optrType == TK_ALL); + } + + nodePtr->nSQLOptr = optrType; + } + return nodePtr; +} + +/* + * pList is the parameters for function with id(optType) + * function name is denoted by pFunctionToken + */ +tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SSQLToken *pFuncToken, SSQLToken *endToken, int32_t optType) { + if (pFuncToken == NULL) return NULL; + + tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); + pExpr->nSQLOptr = optType; + pExpr->pParam = pList; + + int32_t len = (endToken->z + endToken->n) - pFuncToken->z; + pExpr->operand.z = pFuncToken->z; + + pExpr->operand.n = len; // raw field name + pExpr->operand.type = pFuncToken->type; + return pExpr; +} + +/* + * create binary expression in this procedure + * if the expr is arithmetic, calculate the result and set it to tSQLExpr Object + */ +tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { + tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); + + if (optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || + optrType == TK_REM) { + /* + * if a token is noted as the TK_TIMESTAMP, the time precision is microsecond + * Otherwise, the time precision is adaptive, determined by the time precision from databases. + */ + if ((pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_INTEGER) || + (pLeft->nSQLOptr == TK_TIMESTAMP && pRight->nSQLOptr == TK_TIMESTAMP)) { + pExpr->val.nType = TSDB_DATA_TYPE_BIGINT; + pExpr->nSQLOptr = pLeft->nSQLOptr; + + switch (optrType) { + case TK_PLUS: { + pExpr->val.i64Key = pLeft->val.i64Key + pRight->val.i64Key; + break; + } + case TK_MINUS: { + pExpr->val.i64Key = pLeft->val.i64Key - pRight->val.i64Key; + break; + } + case TK_STAR: { + pExpr->val.i64Key = pLeft->val.i64Key * pRight->val.i64Key; + break; + } + case TK_DIVIDE: { + pExpr->nSQLOptr = TK_FLOAT; + pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE; + pExpr->val.dKey = (double)pLeft->val.i64Key / pRight->val.i64Key; + break; + } + case TK_REM: { + pExpr->val.i64Key = pLeft->val.i64Key % pRight->val.i64Key; + break; + } + } + + tSQLExprDestroy(pLeft); + tSQLExprDestroy(pRight); + + } else if ((pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE && pRight->val.nType == TSDB_DATA_TYPE_BIGINT) || + (pRight->val.nType == TSDB_DATA_TYPE_DOUBLE && pLeft->val.nType == TSDB_DATA_TYPE_BIGINT)) { + pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE; + pExpr->nSQLOptr = TK_FLOAT; + + double left = pLeft->val.nType == TSDB_DATA_TYPE_DOUBLE ? pLeft->val.dKey : pLeft->val.i64Key; + double right = pRight->val.nType == TSDB_DATA_TYPE_DOUBLE ? pRight->val.dKey : pRight->val.i64Key; + + switch (optrType) { + case TK_PLUS: { + pExpr->val.dKey = left + right; + break; + } + case TK_MINUS: { + pExpr->val.dKey = left - right; + break; + } + case TK_STAR: { + pExpr->val.dKey = left * right; + break; + } + case TK_DIVIDE: { + pExpr->val.dKey = left / right; + break; + } + case TK_REM: { + pExpr->val.dKey = left - ((int64_t)(left / right)) * right; + break; + } + } + + tSQLExprDestroy(pLeft); + tSQLExprDestroy(pRight); + + } else { + pExpr->nSQLOptr = optrType; + pExpr->pLeft = pLeft; + pExpr->pRight = pRight; + } + } else if (optrType == TK_IN) { + pExpr->nSQLOptr = optrType; + pExpr->pLeft = pLeft; + + tSQLExpr *pRSub = calloc(1, sizeof(tSQLExpr)); + pRSub->nSQLOptr = TK_SET; // TODO refactor ..... + pRSub->pParam = (tSQLExprList *)pRight; + + pExpr->pRight = pRSub; + } else { + pExpr->nSQLOptr = optrType; + pExpr->pLeft = pLeft; + pExpr->pRight = pRight; + } + + return pExpr; +} + +void tSQLExprDestroy(tSQLExpr *pExpr) { + if (pExpr == NULL) return; + + tSQLExprDestroy(pExpr->pLeft); + tSQLExprDestroy(pExpr->pRight); + + if (pExpr->nSQLOptr == TK_STRING) { + tVariantDestroy(&pExpr->val); + } + + tSQLExprListDestroy(pExpr->pParam); + + free(pExpr); +} + +static void *tVariantListExpand(tVariantList *pList) { + if (pList->nAlloc <= pList->nExpr) { // + int32_t newSize = (pList->nAlloc << 1) + 4; + + void *ptr = realloc(pList->a, newSize * sizeof(pList->a[0])); + if (ptr == 0) { + return NULL; + } + + pList->nAlloc = newSize; + pList->a = ptr; + } + + assert(pList->a != 0); + return pList; +} + +tVariantList *tVariantListAppend(tVariantList *pList, tVariant *pVar, uint8_t sortOrder) { + if (pList == NULL) { + pList = calloc(1, sizeof(tVariantList)); + } + + if (tVariantListExpand(pList) == NULL) { + return pList; + } + + if (pVar) { + tVariantListItem *pItem = &pList->a[pList->nExpr++]; + /* + * Here we do not employ the assign function, since we need the pz attribute of structure + * , which is the point to char string, to free it! + * + * Otherwise, the original pointer may be lost, which causes memory leak. + */ + memcpy(pItem, pVar, sizeof(tVariant)); + pItem->sortOrder = sortOrder; + } + return pList; +} + +tVariantList *tVariantListInsert(tVariantList *pList, tVariant *pVar, uint8_t sortOrder, int32_t index) { + if (pList == NULL || index >= pList->nExpr) { + return tVariantListAppend(NULL, pVar, sortOrder); + } + + if (tVariantListExpand(pList) == NULL) { + return pList; + } + + if (pVar) { + memmove(&pList->a[index + 1], &pList->a[index], sizeof(tVariantListItem) * (pList->nExpr - index)); + + tVariantListItem *pItem = &pList->a[index]; + /* + * Here we do not employ the assign function, since we need the pz attribute of structure + * , which is the point to char string, to free it! + * + * Otherwise, the original pointer may be lost, which causes memory leak. + */ + memcpy(pItem, pVar, sizeof(tVariant)); + pItem->sortOrder = sortOrder; + + pList->nExpr++; + } + + return pList; +} + +void tVariantListDestroy(tVariantList *pList) { + if (pList == NULL) return; + + for (int32_t i = 0; i < pList->nExpr; ++i) { + tVariantDestroy(&pList->a[i].pVar); + } + + free(pList->a); + free(pList); +} + +tFieldList *tFieldListAppend(tFieldList *pList, TAOS_FIELD *pField) { + if (pList == NULL) pList = calloc(1, sizeof(tFieldList)); + + if (pList->nAlloc <= pList->nField) { // + pList->nAlloc = (pList->nAlloc << 1) + 4; + pList->p = realloc(pList->p, pList->nAlloc * sizeof(pList->p[0])); + if (pList->p == 0) { + pList->nField = pList->nAlloc = 0; + return pList; + } + } + assert(pList->p != 0); + + if (pField) { + struct TAOS_FIELD *pItem = (struct TAOS_FIELD *)&pList->p[pList->nField++]; + memcpy(pItem, pField, sizeof(TAOS_FIELD)); + } + return pList; +} + +void tFieldListDestroy(tFieldList *pList) { + if (pList == NULL) return; + + free(pList->p); + free(pList); +} + +void setDBName(SSQLToken *pCpxName, SSQLToken *pDB) { + pCpxName->type = pDB->type; + pCpxName->z = pDB->z; + pCpxName->n = pDB->n; +} + +void tSQLSetColumnInfo(TAOS_FIELD *pField, SSQLToken *pName, TAOS_FIELD *pType) { + int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]); + /* truncate the column name */ + if (pName->n >= maxLen) { + pName->n = maxLen - 1; + } + + strncpy(pField->name, pName->z, pName->n); + pField->name[pName->n] = 0; + + pField->type = pType->type; + pField->bytes = pType->bytes; +} + +void tSQLSetColumnType(TAOS_FIELD *pField, SSQLToken *type) { + pField->type = -1; + + for (int8_t i = 0; i < sizeof(tDataTypeDesc) / sizeof(tDataTypeDesc[0]); ++i) { + if ((strncasecmp(type->z, tDataTypeDesc[i].aName, tDataTypeDesc[i].nameLen) == 0) && + (type->n == tDataTypeDesc[i].nameLen)) { + pField->type = i; + pField->bytes = tDataTypeDesc[i].nSize; + + if (i == TSDB_DATA_TYPE_NCHAR) { + /* + * for nchar, the TOKENTYPE is the number of character, so the length is the + * number of bytes in UCS-4 format, which is 4 times larger than the + * number of characters + */ + pField->bytes = -(int32_t)type->type * TSDB_NCHAR_SIZE; + } else if (i == TSDB_DATA_TYPE_BINARY) { + /* for binary, the TOKENTYPE is the length of binary */ + pField->bytes = -(int32_t)type->type; + } + break; + } + } +} + +/* + * extract the select info out of sql string + */ +SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, SSQLToken *pFrom, tSQLExpr *pWhere, + tVariantList *pGroupby, tVariantList *pSortOrder, SSQLToken *pInterval, + SSQLToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { + assert(pSelection != NULL && pFrom != NULL && pInterval != NULL && pLimit != NULL && pGLimit != NULL); + + SQuerySQL *pQuery = calloc(1, sizeof(SQuerySQL)); + pQuery->selectToken = *pSelectToken; + pQuery->selectToken.n = strlen(pQuery->selectToken.z); // all later sql string are belonged to the stream sql + + pQuery->pSelection = pSelection; + pQuery->from = *pFrom; + pQuery->pGroupby = pGroupby; + pQuery->pSortOrder = pSortOrder; + pQuery->pWhere = pWhere; + + pQuery->limit = *pLimit; + pQuery->glimit = *pGLimit; + + pQuery->interval = *pInterval; + pQuery->sliding = *pSliding; + pQuery->fillType = pFill; + + return pQuery; +} + +tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList) { + if (pList == NULL) pList = calloc(1, sizeof(tSQLExprListList)); + + if (pList->nAlloc <= pList->nList) { // + pList->nAlloc = (pList->nAlloc << 1) + 4; + pList->a = realloc(pList->a, pList->nAlloc * sizeof(pList->a[0])); + if (pList->a == 0) { + pList->nList = pList->nAlloc = 0; + return pList; + } + } + assert(pList->a != 0); + + if (pExprList) { + pList->a[pList->nList++] = pExprList; + } + + return pList; +} + +void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList) { + SInsertSQL *pInsert = calloc(1, sizeof(SInsertSQL)); + + pInsert->name = *pName; + pInsert->pValue = pList; + + pInfo->pInsertInfo = pInsert; + pInfo->sqlType = TSQL_INSERT; +} + +void destroyQuerySql(SQuerySQL *pSql) { + if (pSql == NULL) return; + + tSQLExprListDestroy(pSql->pSelection); + pSql->pSelection = NULL; + + tSQLExprDestroy(pSql->pWhere); + pSql->pWhere = NULL; + + tVariantListDestroy(pSql->pSortOrder); + pSql->pSortOrder = NULL; + + tVariantListDestroy(pSql->pGroupby); + pSql->pGroupby = NULL; + + tVariantListDestroy(pSql->fillType); + + free(pSql); +} + +SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName, + tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) { + SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL)); + + switch (type) { + case TSQL_CREATE_NORMAL_METER: { + pCreate->colInfo.pColumns = pCols; + assert(pTagVals == NULL && pTags == NULL); + break; + } + case TSQL_CREATE_NORMAL_METRIC: { + pCreate->colInfo.pColumns = pCols; + pCreate->colInfo.pTagColumns = pTags; + assert(pTagVals == NULL && pTags != NULL && pCols != NULL); + break; + } + case TSQL_CREATE_METER_FROM_METRIC: { + pCreate->usingInfo.pTagVals = pTagVals; + pCreate->usingInfo.metricName = *pMetricName; + break; + } + case TSQL_CREATE_STREAM: { + pCreate->pSelect = pSelect; + break; + } + default: + assert(false); + } + + return pCreate; +} + +SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) { + SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL)); + pAlterTable->name = *pMeterName; + + if (type == ALTER_TABLE_ADD_COLUMN || type == ALTER_TABLE_TAGS_ADD) { + pAlterTable->pAddColumns = pCols; + assert(pVals == NULL); + } else { + /* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP, + * ALTER_TABLE_DROP_COLUMN */ + pAlterTable->varList = pVals; + assert(pCols == NULL); + } + + return pAlterTable; +} + +void SQLInfoDestroy(SSqlInfo *pInfo) { + if (pInfo == NULL) return; + + if (pInfo->sqlType == TSQL_QUERY_METER) { + destroyQuerySql(pInfo->pQueryInfo); + } else if (pInfo->sqlType >= TSQL_CREATE_NORMAL_METER && pInfo->sqlType <= TSQL_CREATE_STREAM) { + SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; + destroyQuerySql(pCreateTableInfo->pSelect); + + tFieldListDestroy(pCreateTableInfo->colInfo.pColumns); + tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns); + + tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals); + tfree(pInfo->pCreateTableInfo); + } else if (pInfo->sqlType >= ALTER_TABLE_TAGS_ADD && pInfo->sqlType <= ALTER_TABLE_DROP_COLUMN) { + tVariantListDestroy(pInfo->pAlterInfo->varList); + tFieldListDestroy(pInfo->pAlterInfo->pAddColumns); + tfree(pInfo->pAlterInfo); + } else { + if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) { + free(pInfo->pDCLInfo->a); + } + + if (pInfo->sqlType == CREATE_DATABASE) { + tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep); + } + + tfree(pInfo->pDCLInfo); + } +} + +void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) { + pInfo->sqlType = type; + pInfo->pCreateTableInfo = pSqlExprInfo; + + if (pMeterName != NULL) { + pInfo->pCreateTableInfo->name = *pMeterName; + } +} + +void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists) { + pInfo->pCreateTableInfo->name = *pMeterName; + pInfo->pCreateTableInfo->existCheck = (pIfNotExists->n != 0); +} + +void tTokenListBuyMoreSpace(tDCLSQL *pTokenList) { + if (pTokenList->nAlloc <= pTokenList->nTokens) { // + pTokenList->nAlloc = (pTokenList->nAlloc << 1) + 4; + pTokenList->a = realloc(pTokenList->a, pTokenList->nAlloc * sizeof(pTokenList->a[0])); + if (pTokenList->a == 0) { + pTokenList->nTokens = pTokenList->nAlloc = 0; + } + } +} + +tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken) { + if (pToken == NULL) return NULL; + + if (pTokenList == NULL) pTokenList = calloc(1, sizeof(tDCLSQL)); + + tTokenListBuyMoreSpace(pTokenList); + pTokenList->a[pTokenList->nTokens++] = *pToken; + + return pTokenList; +} + +void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) { + pInfo->sqlType = type; + + if (nParam == 0) return; + if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + + va_list va; + va_start(va, nParam); + + while (nParam-- > 0) { + SSQLToken *pToken = va_arg(va, SSQLToken *); + tTokenListAppend(pInfo->pDCLInfo, pToken); + } + va_end(va); +} + +void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBSQL *pDB, SSQLToken *pIgExists) { + pInfo->sqlType = type; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + pInfo->pDCLInfo->dbOpt = *pDB; + pInfo->pDCLInfo->dbOpt.dbname = *pToken; + + tTokenListAppend(pInfo->pDCLInfo, pIgExists); +} + +void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) { + pInfo->sqlType = type; + if (pInfo->pDCLInfo == NULL) { + pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + } + + pInfo->pDCLInfo->acctOpt = *pAcctInfo; + + tTokenListAppend(pInfo->pDCLInfo, pName); + + if (pPwd->n > 0) { + tTokenListAppend(pInfo->pDCLInfo, pPwd); + } +} diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c new file mode 100644 index 000000000000..c67deba51540 --- /dev/null +++ b/src/client/src/tscSchemaUtil.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tschemautil.h" +#include "ttypes.h" +#include "tutil.h" + +bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols) { + if (!VALIDNUMOFCOLS(numOfCols)) { + return false; + } + + /* first column must be the timestamp, which is a primary key */ + if (pSchema[0].type != TSDB_DATA_TYPE_TIMESTAMP) { + return false; + } + + /* type is valid, length is valid */ + int32_t rowLen = 0; + + for (int32_t i = 0; i < numOfCols; ++i) { + // 1. valid types + if (pSchema[i].type > TSDB_DATA_TYPE_TIMESTAMP || pSchema[i].type < TSDB_DATA_TYPE_BOOL) { + return false; + } + + // 2. valid length for each type + if (pSchema[i].type == TSDB_DATA_TYPE_TIMESTAMP) { + if (pSchema[i].bytes > TSDB_MAX_BINARY_LEN) { + return false; + } + } else { + if (pSchema[i].bytes != tDataTypeDesc[pSchema[i].type].nSize) { + return false; + } + } + + // 3. valid column names + for (int32_t j = i + 1; j < numOfCols; ++j) { + if (strncasecmp(pSchema[i].name, pSchema[j].name, TSDB_COL_NAME_LEN) == 0) { + return false; + } + } + + rowLen += pSchema[i].bytes; + } + + // valid total length + return (rowLen <= TSDB_MAX_BYTES_PER_ROW); +} + +struct SSchema* tsGetSchema(SMeterMeta* pMeta) { + if (pMeta == NULL) { + return NULL; + } + return tsGetSchemaColIdx(pMeta, 0); +} + +struct SSchema* tsGetTagSchema(SMeterMeta* pMeta) { + if (pMeta == NULL || pMeta->numOfTags == 0) { + return NULL; + } + + return tsGetSchemaColIdx(pMeta, pMeta->numOfColumns); +} + +struct SSchema* tsGetSchemaColIdx(SMeterMeta* pMeta, int32_t startCol) { + if (pMeta->pSchema == 0) { + pMeta->pSchema = sizeof(SMeterMeta); + } + + return (SSchema*)(((char*)pMeta + pMeta->pSchema) + startCol * sizeof(SSchema)); +} + +char* tsGetTagsValue(SMeterMeta* pMeta) { + if (pMeta->tags == 0) { + int32_t numOfTotalCols = pMeta->numOfColumns + pMeta->numOfTags; + pMeta->tags = sizeof(SMeterMeta) + numOfTotalCols * sizeof(SSchema); + } + + return ((char*)pMeta + pMeta->tags); +} + +bool tsMeterMetaIdentical(SMeterMeta* p1, SMeterMeta* p2) { + if (p1 == NULL || p2 == NULL || p1->uid != p2->uid || p1->sversion != p2->sversion) { + return false; + } + + if (p1 == p2) { + return true; + } + + size_t size = sizeof(SMeterMeta) + p1->numOfColumns * sizeof(SSchema); + + for (int32_t i = 0; i < p1->numOfTags; ++i) { + SSchema* pColSchema = tsGetSchemaColIdx(p1, i + p1->numOfColumns); + size += pColSchema->bytes; + } + + return memcmp(p1, p2, size) == 0; +} + +static FORCE_INLINE char* skipSegments(char* input, char delimiter, int32_t num) { + for (int32_t i = 0; i < num; ++i) { + while (*input != 0 && *input++ != delimiter) { + }; + } + return input; +} + +static FORCE_INLINE void copySegment(char* dst, char* src, char delimiter) { + while (*src != delimiter && *src != 0) { + *dst++ = *src++; + } +} + +/** + * extract meter name from meterid, which the format of userid.dbname.metername + * @param meterId + * @return + */ +void extractMeterName(char* meterId, char* name) { + char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 2); + copySegment(name, r, TS_PATH_DELIMITER[0]); +} + +void extractDBName(char* meterId, char* name) { + char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 1); + copySegment(name, r, TS_PATH_DELIMITER[0]); +} diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c new file mode 100644 index 000000000000..b912264c4edc --- /dev/null +++ b/src/client/src/tscSecondaryMerge.c @@ -0,0 +1,1388 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "tlosertree.h" +#include "tsclient.h" +#include "tscSecondaryMerge.h" +#include "tscUtil.h" +#include "tutil.h" + +typedef struct SCompareParam { + SLocalDataSrc ** pLocalData; + tOrderDescriptor *pDesc; + int32_t numOfElems; + int32_t groupOrderType; +} SCompareParam; + +int32_t treeComparator(const void *pLeft, const void *pRight, void *param) { + int32_t pLeftIdx = *(int32_t *)pLeft; + int32_t pRightIdx = *(int32_t *)pRight; + + SCompareParam * pParam = (SCompareParam *)param; + tOrderDescriptor *pDesc = pParam->pDesc; + SLocalDataSrc ** pLocalData = pParam->pLocalData; + + /* this input is exhausted, set the special value to denote this */ + if (pLocalData[pLeftIdx]->rowIdx == -1) { + return 1; + } + + if (pLocalData[pRightIdx]->rowIdx == -1) { + return -1; + } + + if (pParam->groupOrderType == TSQL_SO_DESC) { // desc + return compare_d(pDesc, pParam->numOfElems, pLocalData[pLeftIdx]->rowIdx, pLocalData[pLeftIdx]->filePage.data, + pParam->numOfElems, pLocalData[pRightIdx]->rowIdx, pLocalData[pRightIdx]->filePage.data); + } else { + return compare_a(pDesc, pParam->numOfElems, pLocalData[pLeftIdx]->rowIdx, pLocalData[pLeftIdx]->filePage.data, + pParam->numOfElems, pLocalData[pRightIdx]->rowIdx, pLocalData[pRightIdx]->filePage.data); + } +} + +static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pReducer, tOrderDescriptor *pDesc) { + /* + * the fields and offset attributes in pCmd and pModel may be different due to + * merge requirement. So, the final result in STscRes structure is formatted in accordance with the pCmd object. + */ + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SQLFunctionCtx *pCtx = &pReducer->pCtx[i]; + + pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pReducer->resColModel->maxCapacity; + + pCtx->order = pCmd->order.order; + pCtx->aInputElemBuf = + pReducer->pTempBuffer->data + pDesc->pSchema->colOffset[i]; // input buffer hold only one point data + + // input data format comes from pModel + pCtx->inputType = pDesc->pSchema->pFields[i].type; + pCtx->inputBytes = pDesc->pSchema->pFields[i].bytes; + + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + // output data format yet comes from pCmd. + pCtx->outputBytes = pField->bytes; + pCtx->outputType = pField->type; + pCtx->numOfOutputElems = 0; + + pCtx->numOfIteratedElems = 0; + pCtx->startOffset = 0; + pCtx->size = 1; + pCtx->hasNullValue = true; + pCtx->currentStage = SECONDARY_STAGE_MERGE; + + pRes->bytes[i] = pField->bytes; + + int32_t sqlFunction = tscSqlExprGet(pCmd, i)->sqlFuncId; + if (sqlFunction == TSDB_FUNC_TOP_DST || sqlFunction == TSDB_FUNC_BOTTOM_DST) { + /* for top_dst/bottom_dst function, the output of timestamp is the first column */ + pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf; + + pCtx->param[2].i64Key = pCmd->order.order; + pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; + pCtx->param[3].i64Key = sqlFunction; + pCtx->param[3].nType = TSDB_DATA_TYPE_BIGINT; + + pCtx->param[1].i64Key = pCmd->order.orderColId; + } + } +} + +/* + * todo error process with async process + */ +void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, + tColModel *finalmodel, SSqlCmd *pCmd, SSqlRes *pRes) { + // offset of cmd in SSqlObj structure + char *pSqlObjAddr = (char *)pCmd - offsetof(SSqlObj, cmd); + + if (pMemBuffer == NULL || pDesc->pSchema == NULL) { + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + + tscError("%p no local buffer or intermediate result format model", pSqlObjAddr); + pRes->code = TSDB_CODE_APP_ERROR; + return; + } + + int32_t numOfFlush = 0; + for (int32_t i = 0; i < numOfBuffer; ++i) { + int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength; + if (len == 0) { + tscTrace("%p no data retrieved from orderOfVnode:%d", pSqlObjAddr, i + 1); + continue; + } + + numOfFlush += len; + } + + if (numOfFlush == 0 || numOfBuffer == 0) { + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + + tscTrace("%p retrieved no data", pSqlObjAddr); + return; + } + + if (pDesc->pSchema->maxCapacity >= pMemBuffer[0]->nPageSize) { + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + + tscError("%p Invalid value of buffer capacity %d and page size %d ", pSqlObjAddr, pDesc->pSchema->maxCapacity, + pMemBuffer[0]->nPageSize); + pRes->code = TSDB_CODE_APP_ERROR; + return; + } + + size_t nReducerSize = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush; + SLocalReducer *pReducer = (SLocalReducer *)calloc(1, nReducerSize); + if (pReducer == NULL) { + tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer); + + tscError("%p failed to create merge structure", pSqlObjAddr); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return; + } + + pReducer->pExtMemBuffer = pMemBuffer; + pReducer->pLocalDataSrc = (SLocalDataSrc **)&pReducer[1]; + assert(pReducer->pLocalDataSrc != NULL); + + pReducer->numOfBuffer = numOfFlush; + pReducer->numOfVnode = numOfBuffer; + + pReducer->pDesc = pDesc; + pTrace("%p the number of merged leaves is: %d", pSqlObjAddr, pReducer->numOfBuffer); + + int32_t idx = 0; + for (int32_t i = 0; i < numOfBuffer; ++i) { + int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength; + + for (int32_t j = 0; j < numOfFlushoutInFile; ++j) { + SLocalDataSrc *pDS = (SLocalDataSrc *)malloc(sizeof(SLocalDataSrc) + pMemBuffer[0]->nPageSize); + if (pDS == NULL) { + tscError("%p failed to create merge structure", pSqlObjAddr); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return; + } + pReducer->pLocalDataSrc[idx] = pDS; + + pDS->pMemBuffer = pMemBuffer[i]; + pDS->flushoutIdx = j; + pDS->filePage.numOfElems = 0; + pDS->pageId = 0; + pDS->rowIdx = 0; + + tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSqlObjAddr, i + 1, idx + 1); + tExtMemBufferLoadData(pMemBuffer[i], &(pDS->filePage), j, 0); +#ifdef _DEBUG_VIEW + printf("load data page into mem for build loser tree: %ld rows\n", pDS->filePage.numOfElems); + SSrcColumnInfo colInfo[256] = {0}; + tscGetSrcColumnInfo(colInfo, pCmd); + + tColModelDisplayEx(pDesc->pSchema, pDS->filePage.data, pDS->filePage.numOfElems, pMemBuffer[0]->numOfElemsPerPage, + colInfo); +#endif + if (pDS->filePage.numOfElems == 0) { // no data in this flush + tscTrace("%p flush data is empty, ignore %d flush record", pSqlObjAddr, idx); + tfree(pDS); + continue; + } + idx += 1; + } + } + assert(idx >= pReducer->numOfBuffer); + if (idx == 0) { + return; + } + + pReducer->numOfBuffer = idx; + + SCompareParam *param = malloc(sizeof(SCompareParam)); + param->pLocalData = pReducer->pLocalDataSrc; + param->pDesc = pReducer->pDesc; + param->numOfElems = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage; + param->groupOrderType = pCmd->groupbyExpr.orderType; + + pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator); + if (pReducer->pLoserTree == NULL || pRes->code != 0) { + return; + } + + // the input data format follows the old format, but output in a new format. + // so, all the input must be parsed as old format + pReducer->pCtx = (SQLFunctionCtx *)calloc(pCmd->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx)); + + pReducer->rowSize = pMemBuffer[0]->nElemSize; + + tscRestoreSQLFunctionForMetricQuery(pCmd); + tscFieldInfoCalOffset(pCmd); + + if (pReducer->rowSize > pMemBuffer[0]->nPageSize) { + assert(false); // todo fixed row size is larger than the minimum page size; + } + + pReducer->hasPrevRow = false; + pReducer->hasUnprocessedRow = false; + + pReducer->prevRowOfInput = (char *)calloc(1, pReducer->rowSize); + if (pReducer->prevRowOfInput == 0) { + // todo release previously allocated memory + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return; + } + + /* used to keep the latest input row */ + pReducer->pTempBuffer = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage)); + + pReducer->discardData = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage)); + pReducer->discard = false; + + pReducer->nResultBufSize = pMemBuffer[0]->nPageSize * 16; + pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage)); + + int32_t finalRowLength = tscGetResRowLength(pCmd); + pReducer->resColModel = finalmodel; + pReducer->resColModel->maxCapacity = pReducer->nResultBufSize / finalRowLength; + assert(finalRowLength <= pReducer->rowSize); + + pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize); + + if (pReducer->pTempBuffer == NULL || pReducer->pResultBuf == NULL || pReducer->pBufForInterpo == NULL) { + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return; + } + + pReducer->pTempBuffer->numOfElems = 0; + + tscCreateResPointerInfo(pCmd, pRes); + tscInitSqlContext(pCmd, pRes, pReducer, pDesc); + + /* we change the maxCapacity of schema to denote that there is only one row in temp buffer */ + pReducer->pDesc->pSchema->maxCapacity = 1; + pReducer->offset = pCmd->limit.offset; + + pRes->pLocalReducer = pReducer; + pRes->numOfGroups = 0; + + int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; + int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit); + + SInterpolationInfo *pInterpoInfo = &pReducer->interpolationInfo; + taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupbyCols, + pReducer->rowSize); + + int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupbyCols; + + if (pCmd->groupbyExpr.numOfGroupbyCols > 0) { + pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pCmd->groupbyExpr.numOfGroupbyCols; + for (int32_t i = 1; i < pCmd->groupbyExpr.numOfGroupbyCols; ++i) { + pInterpoInfo->pTags[i] = pReducer->resColModel->pFields[startIndex + i - 1].bytes + pInterpoInfo->pTags[i - 1]; + } + } else { + assert(pInterpoInfo->pTags == NULL); + } +} + +static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, + int32_t orderType) { + if (pPage->numOfElems == 0) { + return 0; + } + + assert(pPage->numOfElems <= pDesc->pSchema->maxCapacity); + + // sort before flush to disk, the data must be consecutively put on tFilePage. + if (pDesc->orderIdx.numOfOrderedCols > 0) { + tColDataQSort(pDesc, pPage->numOfElems, 0, pPage->numOfElems - 1, pPage->data, orderType); + } + +#ifdef _DEBUG_VIEW + printf("%ld rows data flushed to disk after been sorted:\n", pPage->numOfElems); + tColModelDisplay(pDesc->pSchema, pPage->data, pPage->numOfElems, pPage->numOfElems); +#endif + + // write to cache after being sorted + if (tExtMemBufferPut(pMemoryBuf, pPage->data, pPage->numOfElems) < 0) { + tscError("failed to save data in temporary buffer"); + return -1; + } + + pPage->numOfElems = 0; + return 0; +} + +int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, int32_t orderType) { + int32_t ret = tscFlushTmpBufferImpl(pMemoryBuf, pDesc, pPage, orderType); + if (ret != 0) { + return -1; + } + + if (!tExtMemBufferFlush(pMemoryBuf)) { + return -1; + } + + return 0; +} + +int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data, + int32_t numOfRows, int32_t orderType) { + if (pPage->numOfElems + numOfRows <= pDesc->pSchema->maxCapacity) { + tColModelAppend(pDesc->pSchema, pPage, data, 0, numOfRows, numOfRows); + return 0; + } + + tColModel *pModel = pDesc->pSchema; + + int32_t numOfRemainEntries = pDesc->pSchema->maxCapacity - pPage->numOfElems; + tColModelAppend(pModel, pPage, data, 0, numOfRemainEntries, numOfRows); + + /* current buffer is full, need to flushed to disk */ + assert(pPage->numOfElems == pDesc->pSchema->maxCapacity); + int32_t ret = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType); + if (ret != 0) { + return -1; + } + + int32_t remain = numOfRows - numOfRemainEntries; + + while (remain > 0) { + int32_t numOfWriteElems = 0; + if (remain > pModel->maxCapacity) { + numOfWriteElems = pModel->maxCapacity; + } else { + numOfWriteElems = remain; + } + + tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows); + + if (pPage->numOfElems == pModel->maxCapacity) { + int32_t ret = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType); + if (ret != 0) { + return -1; + } + } else { + pPage->numOfElems = numOfWriteElems; + } + + remain -= numOfWriteElems; + numOfRemainEntries += numOfWriteElems; + } + + return 0; +} + +void tscDestroyLocalReducer(SSqlObj *pSql) { + if (pSql == NULL) { + return; + } + + tscTrace("%p start to free local reducer", pSql); + SSqlRes *pRes = &(pSql->res); + if (pRes->pLocalReducer == NULL) { + tscTrace("%p local reducer has been freed, abort", pSql); + return; + } + + // there is no more result, so we release all allocated resource + SLocalReducer *pLocalReducer = + (SLocalReducer *)__sync_val_compare_and_swap_64(&pRes->pLocalReducer, pRes->pLocalReducer, 0); + if (pLocalReducer != NULL) { + int32_t status = 0; + while ((status = __sync_val_compare_and_swap_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, + TSC_LOCALREDUCE_TOBE_FREED)) == TSC_LOCALREDUCE_IN_PROGRESS) { + taosMsleep(100); + tscTrace("%p waiting for delete procedure, status: %d", pSql, status); + } + + tfree(pLocalReducer->interpolationInfo.prevValues); + tfree(pLocalReducer->interpolationInfo.pTags); + + tfree(pLocalReducer->pCtx); + tfree(pLocalReducer->prevRowOfInput); + + tfree(pLocalReducer->pTempBuffer); + tfree(pLocalReducer->pResultBuf); + + if (pLocalReducer->pLoserTree) { + tfree(pLocalReducer->pLoserTree->param); + tfree(pLocalReducer->pLoserTree); + } + + tfree(pLocalReducer->pBufForInterpo); + + tfree(pLocalReducer->pFinalRes); + tfree(pLocalReducer->discardData); + + tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, + pLocalReducer->numOfVnode); + for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) { + tfree(pLocalReducer->pLocalDataSrc[i]); + } + + pLocalReducer->numOfBuffer = 0; + pLocalReducer->numOfCompleted = 0; + free(pLocalReducer); + } else { + tscTrace("%p already freed or another free function is invoked", pSql); + } + + tscTrace("%p free local reducer finished", pSql); +} + +static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, tColModel *pModel) { + int32_t numOfGroupByCols = 0; + if (pCmd->groupbyExpr.numOfGroupbyCols > 0) { + numOfGroupByCols = pCmd->groupbyExpr.numOfGroupbyCols; + } + + // primary timestamp column is involved in final result + if (pCmd->nAggTimeInterval != 0) { + numOfGroupByCols++; + } + + int32_t *orderIdx = (int32_t *)calloc(numOfGroupByCols, sizeof(int32_t)); + if (orderIdx == NULL) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + if (numOfGroupByCols > 0) { + int32_t startCols = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupbyCols; + + // tags value locate at the last columns + for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupbyCols; ++i) { + orderIdx[i] = startCols++; + } + + if (pCmd->nAggTimeInterval != 0) { + /* + * the first column is the timestamp, handles queries like "interval(10m) group by tags" + */ + orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } + } + + *pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pCmd->order.order); + tfree(orderIdx); + + if (*pOrderDesc == NULL) { + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } else { + return TSDB_CODE_SUCCESS; + } +} + +bool isSameGroupOfPrev(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpPage) { + int16_t functionId = tscSqlExprGet(pCmd, 0)->sqlFuncId; + if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query + return false; // disable merge procedure + } + + tOrderDescriptor *pOrderDesc = pReducer->pDesc; + int32_t numOfCols = pOrderDesc->orderIdx.numOfOrderedCols; + + if (numOfCols > 0) { + if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0 + /* metric interval query */ + assert(pCmd->nAggTimeInterval > 0); + pOrderDesc->orderIdx.numOfOrderedCols -= 1; + } else { /* simple group by query */ + assert(pCmd->nAggTimeInterval == 0); + } + } else { + return true; + } + + // only one row exists + int32_t ret = compare_a(pOrderDesc, 1, 0, pPrev, 1, 0, tmpPage->data); + pOrderDesc->orderIdx.numOfOrderedCols = numOfCols; + + return (ret == 0); +} + +int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc, + tColModel **pFinalModel, uint32_t nBufferSizes) { + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + SSchema * pSchema = NULL; + tColModel *pModel = NULL; + *pFinalModel = NULL; + + (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pCmd->pMetricMeta->numOfVnodes); + if (*pMemBuffer == NULL) { + tscError("%p failed to allocate memory", pSql); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return pRes->code; + } + + pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols); + if (pSchema == NULL) { + tscError("%p failed to allocate memory", pSql); + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return pRes->code; + } + + int32_t rlen = 0; + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + + pSchema[i].bytes = pExpr->resBytes; + pSchema[i].type = pExpr->resType; + + rlen += pExpr->resBytes; + } + + int32_t capacity = nBufferSizes / rlen; + pModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); + + for (int32_t i = 0; i < pCmd->pMetricMeta->numOfVnodes; ++i) { + char tmpPath[512] = {0}; + getExtTmpfilePath("/tv_bf_db_%lld_%lld_%d.d", taosGetPthreadId(), i, 0, tmpPath); + tscTrace("%p create tmp file:%s", pSql, tmpPath); + + tExtMemBufferCreate(&(*pMemBuffer)[i], nBufferSizes, rlen, tmpPath, pModel); + (*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL; + } + + if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) { + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return pRes->code; + } + + memset(pSchema, 0, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols); + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + pSchema[i].type = pField->type; + pSchema[i].bytes = pField->bytes; + strcpy(pSchema[i].name, pField->name); + } + + *pFinalModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); + tfree(pSchema); + + return TSDB_CODE_SUCCESS; +} + +/** + * @param pMemBuffer + * @param pDesc + * @param pFinalModel + * @param numOfVnodes + */ +void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, tColModel *pFinalModel, + int32_t numOfVnodes) { + tColModelDestroy(pFinalModel); + tOrderDescDestroy(pDesc); + for (int32_t i = 0; i < numOfVnodes; ++i) { + tExtMemBufferDestroy(&pMemBuffer[i]); + } + + tfree(pMemBuffer); +} + +/** + * + * @param pLocalReducer + * @param pOneInterDataSrc + * @param treeList + * @return the number of remain input source. if ret == 0, all data has been handled + */ +int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSrc *pOneInterDataSrc, + bool *needAdjustLoserTree) { + pOneInterDataSrc->rowIdx = 0; + pOneInterDataSrc->pageId += 1; + + if (pOneInterDataSrc->pageId < + pOneInterDataSrc->pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[pOneInterDataSrc->flushoutIdx].numOfPages) { + tExtMemBufferLoadData(pOneInterDataSrc->pMemBuffer, &(pOneInterDataSrc->filePage), pOneInterDataSrc->flushoutIdx, + pOneInterDataSrc->pageId); + +#if defined(_DEBUG_VIEW) + printf("new page load to buffer\n"); + tColModelDisplay(pOneInterDataSrc->pMemBuffer->pColModel, pOneInterDataSrc->filePage.data, + pOneInterDataSrc->filePage.numOfElems, pOneInterDataSrc->pMemBuffer->pColModel->maxCapacity); +#endif + *needAdjustLoserTree = true; + } else { + pLocalReducer->numOfCompleted += 1; + + pOneInterDataSrc->rowIdx = -1; + pOneInterDataSrc->pageId = -1; + *needAdjustLoserTree = true; + } + + return pLocalReducer->numOfBuffer; +} + +void loadDataIntoMemAndAdjustLoserTree(SLocalReducer *pLocalReducer, SLocalDataSrc *pOneInterDataSrc, + SLoserTreeInfo *pTree) { + /* + * load a new data page into memory for intermediate dataset source, + * since it's last record in buffer has been chosen to be processed, as the winner of loser-tree + */ + bool needToAdjust = true; + if (pOneInterDataSrc->filePage.numOfElems <= pOneInterDataSrc->rowIdx) { + loadNewDataFromDiskFor(pLocalReducer, pOneInterDataSrc, &needToAdjust); + } + + /* + * adjust loser tree otherwise, according to new candidate data + * if the loser tree is rebuild completed, we do not need to adjust + */ + if (needToAdjust) { + int32_t leafNodeIdx = pTree->pNode[0].index + pLocalReducer->numOfBuffer; + +#ifdef _DEBUG_VIEW + printf("before adjust:\t"); + tLoserTreeDisplay(pTree); +#endif + + tLoserTreeAdjust(pTree, leafNodeIdx); + +#ifdef _DEBUG_VIEW + printf("\nafter adjust:\t"); + tLoserTreeDisplay(pTree); + printf("\n"); +#endif + } +} + +void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SSqlCmd *pCmd, + SInterpolationInfo *pInterpoInfo) { // discard following dataset in the + // same group and reset the + // interpolation information + int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; + int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit); + + taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupbyCols, + pLocalReducer->rowSize); + + pLocalReducer->discard = true; + pLocalReducer->discardData->numOfElems = 0; + + tColModel *pModel = pLocalReducer->pDesc->pSchema; + tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1); +} + +// todo merge with following function +static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage *pFinalDataPage) { + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + int32_t offset = tscFieldInfoGetOffset(pCmd, i); + char * src = pFinalDataPage->data + (pRes->numOfRows - 1) * pField->bytes + pRes->numOfRows * offset; + char * dst = pRes->data + pRes->numOfRows * offset; + + for (int32_t j = 0; j < pRes->numOfRows; ++j) { + memcpy(dst, src, (size_t)pField->bytes); + dst += pField->bytes; + src -= pField->bytes; + } + } +} + +static void reversedCopyFromInterpolationToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage **pResPages, + SLocalReducer *pLocalReducer) { + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + int32_t offset = tscFieldInfoGetOffset(pCmd, i); + assert(offset == pLocalReducer->resColModel->colOffset[i]); + + char *src = pResPages[i]->data + (pRes->numOfRows - 1) * pField->bytes; + char *dst = pRes->data + pRes->numOfRows * offset; + + for (int32_t j = 0; j < pRes->numOfRows; ++j) { + memcpy(dst, src, (size_t)pField->bytes); + dst += pField->bytes; + src -= pField->bytes; + } + } +} + +/* + * Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called + * by "interuptHandler" function in shell + */ +static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) { + SSqlCmd * pCmd = &pSql->cmd; + SSqlRes * pRes = &pSql->res; + tFilePage *pFinalDataPage = pLocalReducer->pResultBuf; + + if (pRes->pLocalReducer != pLocalReducer) { + /* + * Release the SSqlObj is called, and it is int destroying function invoked by other thread. + * However, the other thread will WAIT until current process fully completes. + * Since the flag of release struct is set by doLocalReduce function + */ + assert(pRes->pLocalReducer == NULL); + } + + if (pLocalReducer->pFinalRes == NULL) { + pLocalReducer->pFinalRes = malloc(pLocalReducer->rowSize * pLocalReducer->resColModel->maxCapacity); + } + + if (pCmd->nAggTimeInterval == 0 || pCmd->interpoType == TSDB_INTERPO_NONE) { + // no interval query, no interpolation + pRes->data = pLocalReducer->pFinalRes; + pRes->numOfRows = pFinalDataPage->numOfElems; + pRes->numOfTotal += pRes->numOfRows; + + if (pCmd->limit.offset > 0) { + if (pCmd->limit.offset < pRes->numOfRows) { + int32_t prevSize = pFinalDataPage->numOfElems; + tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pCmd->limit.offset - 1); + + /* remove the hole in column model */ + tColModelCompress(pLocalReducer->resColModel, pFinalDataPage, prevSize); + + pRes->numOfRows -= pCmd->limit.offset; + pRes->numOfTotal -= pCmd->limit.offset; + pCmd->limit.offset = 0; + } else { + pCmd->limit.offset -= pRes->numOfRows; + pRes->numOfRows = 0; + pRes->numOfTotal = 0; + } + } + + if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) { + /* impose the limitation of output rows on the final result */ + int32_t prevSize = pFinalDataPage->numOfElems; + int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit; + + assert(overFlow < pRes->numOfRows); + + pRes->numOfTotal = pCmd->limit.limit; + pRes->numOfRows -= overFlow; + pFinalDataPage->numOfElems -= overFlow; + + tColModelCompress(pLocalReducer->resColModel, pFinalDataPage, prevSize); + + /* set remain data to be discarded, and reset the interpolation information */ + savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, &pLocalReducer->interpolationInfo); + } + + int32_t rowSize = tscGetResRowLength(pCmd); + // handle the descend order output + if (pCmd->order.order == TSQL_SO_ASC) { + memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize); + } else { + reversedCopyResultToDstBuf(pCmd, pRes, pFinalDataPage); + } + + pFinalDataPage->numOfElems = 0; + return; + } + + int64_t * pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo; + SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; + + int64_t actualETime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime; + + tFilePage **pResPages = malloc(POINTER_BYTES * pCmd->fieldsInfo.numOfOutputCols); + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->maxCapacity); + } + + char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pCmd->fieldsInfo.numOfOutputCols); + int32_t *functions = (int32_t *)((char *)srcData + pCmd->fieldsInfo.numOfOutputCols * sizeof(void *)); + + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pCmd, i) * pInterpoInfo->numOfRawDataInRows; + functions[i] = tscSqlExprGet(pCmd, i)->sqlFuncId; + } + + while (1) { + int32_t remains = taosNumOfRemainPoints(pInterpoInfo); + TSKEY etime = taosGetRevisedEndKey(actualETime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit); + int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pCmd->nAggTimeInterval, etime, + pLocalReducer->resColModel->maxCapacity); + + int32_t newRows = taosDoInterpoResult(pInterpoInfo, pCmd->interpoType, pResPages, remains, nrows, + pCmd->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData, + pCmd->defaultVal, functions, pLocalReducer->resColModel->maxCapacity); + assert(newRows <= nrows); + + if (pCmd->limit.offset < newRows) { + newRows -= pCmd->limit.offset; + + if (pCmd->limit.offset > 0) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pCmd->limit.offset, newRows * pField->bytes); + } + } + + pRes->data = pLocalReducer->pFinalRes; + pRes->numOfRows = newRows; + pRes->numOfTotal += newRows; + + pCmd->limit.offset = 0; + break; + } else { + pCmd->limit.offset -= newRows; + pRes->numOfRows = 0; + + int32_t rpoints = taosNumOfRemainPoints(pInterpoInfo); + if (rpoints <= 0) { + if (!doneOutput) { + /* reduce procedure is not completed, but current results for interpolation are exhausted */ + break; + } + + /* all output for current group are completed */ + int32_t totalRemainRows = + taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pCmd->nAggTimeInterval, actualETime); + if (totalRemainRows <= 0) { + break; + } + } + } + } + + if (pRes->numOfRows > 0) { + if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) { + int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit; + pRes->numOfRows -= overFlow; + + assert(pRes->numOfRows >= 0); + + pRes->numOfTotal = pCmd->limit.limit; + pFinalDataPage->numOfElems -= overFlow; + + /* set remain data to be discarded, and reset the interpolation information */ + savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, pInterpoInfo); + } + + if (pCmd->order.order == TSQL_SO_ASC) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + memcpy(pRes->data + pLocalReducer->resColModel->colOffset[i] * pRes->numOfRows, pResPages[i]->data, + pField->bytes * pRes->numOfRows); + } + } else { + reversedCopyFromInterpolationToDstBuf(pCmd, pRes, pResPages, pLocalReducer); + } + } + + pFinalDataPage->numOfElems = 0; + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + tfree(pResPages[i]); + } + tfree(pResPages); + + free(srcData); +} + +static void savePrevRecord(SLocalReducer *pLocalReducer, tFilePage *tmpPages) { + tColModel *pColModel = pLocalReducer->pDesc->pSchema; + assert(pColModel->maxCapacity == 1 && tmpPages->numOfElems == 1); + + // copy to previous temp buffer + for (int32_t i = 0; i < pLocalReducer->pDesc->pSchema->numOfCols; ++i) { + memcpy(pLocalReducer->prevRowOfInput + pColModel->colOffset[i], tmpPages->data + pColModel->colOffset[i], + pColModel->pFields[i].bytes); + } + + tmpPages->numOfElems = 0; + pLocalReducer->hasPrevRow = true; +} + +static void handleUnprocessedRow(SLocalReducer *pLocalReducer, SSqlCmd *pCmd, tFilePage *tmpPages) { + if (pLocalReducer->hasUnprocessedRow) { + for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, j); + + tVariantAssign(&pLocalReducer->pCtx[j].param[0], &pExpr->param[0]); + aAggs[pExpr->sqlFuncId].init(&pLocalReducer->pCtx[j]); + + pLocalReducer->pCtx[j].currentStage = SECONDARY_STAGE_MERGE; + pLocalReducer->pCtx[j].numOfIteratedElems = 0; + aAggs[pExpr->sqlFuncId].distSecondaryMergeFunc(&pLocalReducer->pCtx[j]); + } + + pLocalReducer->hasUnprocessedRow = false; + + // copy to previous temp buffer + savePrevRecord(pLocalReducer, tmpPages); + } +} + +static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) { + int64_t maxOutput = 0; + + for (int32_t j = 0; j < pCmd->exprsInfo.numOfExprs; ++j) { + int32_t functionId = tscSqlExprGet(pCmd, j)->sqlFuncId; + + /* + * ts, tag, tagprj function can not decide the output number of current query + * the number of output result is decided by main output + */ + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { + continue; + } + + if (maxOutput < pCtx[j].numOfOutputElems) { + maxOutput = pCtx[j].numOfOutputElems; + } + } + return maxOutput; +} + +/* + * in handling the to/bottom query, which produce more than one rows result, + * the tsdb_func_tags only fill the first row of results, the remain rows need to + * filled with the same result, which is the tags, specified in group by clause + */ +static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReducer *pLocalReducer) { + int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupbyCols; + + int32_t maxBufSize = 0; + for (int32_t k = startIndex; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + if (maxBufSize < pExpr->resBytes) { + maxBufSize = pExpr->resBytes; + } + assert(pExpr->sqlFuncId == TSDB_FUNC_TAG); + } + + assert(maxBufSize >= 0); + + char *buf = malloc((size_t)maxBufSize); + for (int32_t k = startIndex; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result + memset(buf, 0, (size_t)maxBufSize); + + SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k]; + memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes); + + for (int32_t i = 0; i < inc; ++i) { + pCtx->aOutputBuf += pCtx->outputBytes; + memcpy(pCtx->aOutputBuf, buf, (size_t)pCtx->outputBytes); + } + } + + free(buf); +} + +int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { + for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + aAggs[pExpr->sqlFuncId].xFinalize(&pLocalReducer->pCtx[k]); + } + + pLocalReducer->hasPrevRow = false; + + int32_t numOfRes = (int32_t)getNumOfResultLocal(pCmd, pLocalReducer->pCtx); + pLocalReducer->pResultBuf->numOfElems += numOfRes; + + fillMultiRowsOfTagsVal(pCmd, numOfRes, pLocalReducer); + return numOfRes; +} + +/* + * points merge: + * points are merged according to the sort info, which is tags columns and timestamp column. + * In case of points without either tags columns or timestamp, such as + * results generated by simple aggregation function, we merge them all into one points + * *Exception*: column projection query, required no merge procedure + */ +bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpPages) { + int32_t ret = 0; // merge all result by default + int16_t functionId = tscSqlExprGet(pCmd, 0)->sqlFuncId; + + if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query + ret = 1; // disable merge procedure + } else { + tOrderDescriptor *pDesc = pLocalReducer->pDesc; + if (pDesc->orderIdx.numOfOrderedCols > 0) { + if (pDesc->tsOrder == TSQL_SO_ASC) { // asc + // todo refactor comparator + ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpPages->data); + } else { // desc + ret = compare_d(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpPages->data); + } + } + } + + /* if ret == 0, means the result belongs to the same group */ + return (ret == 0); +} + +void savePreGroupNumOfRes(SSqlRes *pRes) { + // pRes->numOfGroups += 1; + // pRes->pGroupRec = realloc(pRes->pGroupRec, + // pRes->numOfGroups*sizeof(SResRec)); + // + // pRes->pGroupRec[pRes->numOfGroups-1].numOfRows = pRes->numOfRows; + // pRes->pGroupRec[pRes->numOfGroups-1].numOfTotal = pRes->numOfTotal; +} + +void doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, + bool doneOuput) { // there are merged results in buffer, flush to client + SSqlCmd * pCmd = &pSql->cmd; + SSqlRes * pRes = &pSql->res; + tFilePage *pResBuf = pLocalReducer->pResultBuf; + tColModel *pModel = pLocalReducer->resColModel; + + tColModelCompress(pModel, pResBuf, pModel->maxCapacity); + memcpy(pLocalReducer->pBufForInterpo, pResBuf->data, pLocalReducer->nResultBufSize); + +#ifdef _DEBUG_VIEW + printf("final result before interpo:\n"); + tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->numOfElems, pResBuf->numOfElems); +#endif + + SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; + int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupbyCols; + + for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupbyCols; ++i) { + memcpy(pInterpoInfo->pTags[i], + pLocalReducer->pBufForInterpo + pModel->colOffset[startIndex + i] * pResBuf->numOfElems, + pModel->pFields[startIndex + i].bytes); + } + + taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pCmd->interpoType); + doInterpolateResult(pSql, pLocalReducer, doneOuput); + + pRes->code = TSDB_CODE_SUCCESS; +} + +void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + pLocalReducer->pCtx[i].aOutputBuf = + pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pLocalReducer->resColModel->maxCapacity; + } + + memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage)); +} + +static void setUpForNewGroupRes(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { + /* + * In handling data in other groups, we need to reset the interpolation information for a new group data + */ + pRes->numOfRows = 0; + pRes->numOfTotal = 0; + pCmd->limit.offset = pLocalReducer->offset; + + if (pCmd->interpoType != TSDB_INTERPO_NONE) { + /* for group result interpolation, do not return if not data is generated */ + int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime; + int64_t newTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit); + + taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pCmd->order.order, newTime, + pCmd->groupbyExpr.numOfGroupbyCols, pLocalReducer->rowSize); + } +} + +int32_t tscLocalDoReduce(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed + tscTrace("%s call the drop local reducer", __FUNCTION__); + + tscDestroyLocalReducer(pSql); + pRes->numOfRows = 0; + pRes->row = 0; + return 0; + } + + pRes->row = 0; + pRes->numOfRows = 0; + + SLocalReducer *pLocalReducer = pRes->pLocalReducer; + + // set the local reduce in progress + int32_t prevStatus = + __sync_val_compare_and_swap_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS); + if (prevStatus != TSC_LOCALREDUCE_READY || pLocalReducer == NULL) { + assert(prevStatus == TSC_LOCALREDUCE_TOBE_FREED); + /* it is in tscDestroyLocalReducer function already */ + return 0; + } + + SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo; + tFilePage * tmpPages = pLocalReducer->pTempBuffer; + + bool prevGroupDone = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow; + + if ((pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted && !pLocalReducer->hasPrevRow) || + pLocalReducer->pLocalDataSrc[0] == NULL || prevGroupDone) { + /* if interpoType == TSDB_INTERPO_NONE, return directly */ + if (pCmd->interpoType != TSDB_INTERPO_NONE) { + int64_t etime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime; + + etime = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit); + int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pCmd->nAggTimeInterval, etime, + pLocalReducer->resColModel->maxCapacity); + if (rows > 0) { // do interpo + doInterpolateResult(pSql, pLocalReducer, true); + } + } + + /* numOfRows == 0, means no interpolation results are generated yet */ + if (pRes->numOfRows == 0) { + /* local reduce is completed */ + if ((pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted) && (!pLocalReducer->hasUnprocessedRow)) { + pLocalReducer->status = TSC_LOCALREDUCE_READY; + // set the flag, taos_free_result can release this result. + return 0; + } else { + /* start for process result for a new group */ + savePreGroupNumOfRes(pRes); + setUpForNewGroupRes(pRes, pCmd, pLocalReducer); + } + } else { + pLocalReducer->status = TSC_LOCALREDUCE_READY; + // set the flag, taos_free_result can release this result. + return 0; + } + } + + if (taosHasNoneInterpoPoints(pInterpoInfo)) { + assert(pCmd->interpoType != TSDB_INTERPO_NONE); + + tFilePage *pFinalDataPage = pLocalReducer->pResultBuf; + int64_t etime = *(int64_t *)(pFinalDataPage->data + TSDB_KEYSIZE * (pInterpoInfo->numOfRawDataInRows - 1)); + + int32_t remain = taosNumOfRemainPoints(pInterpoInfo); + TSKEY ekey = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit); + int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY *)pLocalReducer->pBufForInterpo, remain, + pCmd->nAggTimeInterval, ekey, pLocalReducer->resColModel->maxCapacity); + if (rows > 0) { // do interpo + doInterpolateResult(pSql, pLocalReducer, false); + } + + pLocalReducer->status = TSC_LOCALREDUCE_READY; + // set the flag, taos_free_result can release this result. + return 0; + } + + SLoserTreeInfo *pTree = pLocalReducer->pLoserTree; + + // clear buffer + handleUnprocessedRow(pLocalReducer, pCmd, tmpPages); + tColModel *pModel = pLocalReducer->pDesc->pSchema; + + while (1) { + _reduce_retrieve: + if (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted) { + pRes->numOfRows = 0; + break; + } + +#ifdef _DEBUG_VIEW + printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index); +#endif + assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) && + tmpPages->numOfElems == 0); + + // chosen from loser tree + SLocalDataSrc *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index]; + + tColModelAppend(pModel, tmpPages, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1, + pOneDataSrc->pMemBuffer->pColModel->maxCapacity); + +#if defined(_DEBUG_VIEW) + printf("chosen row:\t"); + SSrcColumnInfo colInfo[256] = {0}; + tscGetSrcColumnInfo(colInfo, pCmd); + + tColModelDisplayEx(pModel, tmpPages->data, tmpPages->numOfElems, pModel->maxCapacity, colInfo); +#endif + if (pLocalReducer->discard) { + assert(pLocalReducer->hasUnprocessedRow == false); + + /* current record belongs to the same group of previous record, need to discard it */ + if (isSameGroupOfPrev(pCmd, pLocalReducer, pLocalReducer->discardData->data, tmpPages)) { + tmpPages->numOfElems = 0; + pOneDataSrc->rowIdx += 1; + + loadDataIntoMemAndAdjustLoserTree(pLocalReducer, pOneDataSrc, pTree); + /* all inputs are exhausted, abort current process */ + if (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted) { + break; + } + + /* since it belongs to the same group, ignore following records */ + continue; + } else { + pLocalReducer->discard = false; + pLocalReducer->discardData->numOfElems = 0; + + savePreGroupNumOfRes(pRes); + setUpForNewGroupRes(pRes, pCmd, pLocalReducer); + } + } + + if (pLocalReducer->hasPrevRow) { + if (needToMerge(pCmd, pLocalReducer, tmpPages)) { // belong to the group of the previous row + for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, j); + tVariantAssign(&pLocalReducer->pCtx[j].param[0], &pExpr->param[0]); + + aAggs[pExpr->sqlFuncId].distSecondaryMergeFunc(&pLocalReducer->pCtx[j]); + } + + // copy to buffer + savePrevRecord(pLocalReducer, tmpPages); + } else { // reduce the previous is completed, start a new one + int32_t numOfRes = finalizeRes(pCmd, pLocalReducer); + + bool sameGroup = isSameGroupOfPrev(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpPages); + tFilePage *pResBuf = pLocalReducer->pResultBuf; + + /* + * if the previous group does NOTE generate any result + * (pResBuf->numOfElems == 0), + * continue to process results instead of return results. + */ + if ((!sameGroup && pResBuf->numOfElems > 0) || + (pResBuf->numOfElems == pLocalReducer->resColModel->maxCapacity)) { + // does not belong to the same group + assert(pResBuf->numOfElems > 0); + + doGenerateFinalResults(pSql, pLocalReducer, !sameGroup); + + if (pLocalReducer->discard && sameGroup) { + /* this row needs to discard, since it belongs to the group of previous */ + pLocalReducer->hasUnprocessedRow = false; + tmpPages->numOfElems = 0; + } else { + pLocalReducer->hasUnprocessedRow = true; + } + + resetOutputBuf(pCmd, pLocalReducer); + pOneDataSrc->rowIdx += 1; + + /* here we do not check the return value */ + loadDataIntoMemAndAdjustLoserTree(pLocalReducer, pOneDataSrc, pTree); + assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS); + + if (pRes->numOfRows == 0) { + handleUnprocessedRow(pLocalReducer, pCmd, tmpPages); + + if (!sameGroup) { + /* previous group is done, we start a new one by continuing to + * retrieve data */ + savePreGroupNumOfRes(pRes); + setUpForNewGroupRes(pRes, pCmd, pLocalReducer); + } + + goto _reduce_retrieve; + } else { + /* + * if next record belongs to a new group, we do not handle this record here. + * We start the process in a new round. + */ + if (sameGroup) { + handleUnprocessedRow(pLocalReducer, pCmd, tmpPages); + } + } + + pLocalReducer->status = TSC_LOCALREDUCE_READY; + // set the flag, taos_free_result can release this result. + return 0; + } else { // result buffer is not full + for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, k); + + pLocalReducer->pCtx[k].aOutputBuf += pLocalReducer->pCtx[k].outputBytes * numOfRes; + if (pExpr->sqlFuncId == TSDB_FUNC_TOP_DST || pExpr->sqlFuncId == TSDB_FUNC_BOTTOM_DST) { + pLocalReducer->pCtx[k].ptsOutputBuf = + ((char *)pLocalReducer->pCtx[k].ptsOutputBuf + TSDB_KEYSIZE * numOfRes); + } + + /* set the parameters for the SQLFunctionCtx */ + tVariantAssign(&pLocalReducer->pCtx[k].param[0], &pExpr->param[0]); + + aAggs[pExpr->sqlFuncId].init(&pLocalReducer->pCtx[k]); + pLocalReducer->pCtx[k].currentStage = SECONDARY_STAGE_MERGE; + aAggs[pExpr->sqlFuncId].distSecondaryMergeFunc(&pLocalReducer->pCtx[k]); + } + + savePrevRecord(pLocalReducer, tmpPages); + } + } + } else { // put to previous input row for comparision + for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, j); + + tVariantAssign(&pLocalReducer->pCtx[j].param[0], &pExpr->param[0]); + pLocalReducer->pCtx[j].numOfIteratedElems = 0; + pLocalReducer->pCtx[j].currentStage = 0; + + aAggs[pExpr->sqlFuncId].init(&pLocalReducer->pCtx[j]); + pLocalReducer->pCtx[j].currentStage = SECONDARY_STAGE_MERGE; + + aAggs[pExpr->sqlFuncId].distSecondaryMergeFunc(&pLocalReducer->pCtx[j]); + } + + // copy to buffer + savePrevRecord(pLocalReducer, tmpPages); + } + + pOneDataSrc->rowIdx += 1; + + loadDataIntoMemAndAdjustLoserTree(pLocalReducer, pOneDataSrc, pTree); + if (pLocalReducer->numOfCompleted == pLocalReducer->numOfBuffer) { + break; + } + } + + if (pLocalReducer->hasPrevRow) { + finalizeRes(pCmd, pLocalReducer); + } + + if (pLocalReducer->pResultBuf->numOfElems) { + doGenerateFinalResults(pSql, pLocalReducer, true); + } + + assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS && pRes->row == 0); + pLocalReducer->status = TSC_LOCALREDUCE_READY; + // set the flag, taos_free_result can release this result. + + return TSDB_CODE_SUCCESS; +} + +void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) { + SSqlRes *pRes = &pObj->res; + if (pRes->pLocalReducer != NULL) { + tscDestroyLocalReducer(pObj); + } + + pRes->qhandle = 1; // hack to pass the safety check in fetch_row function + pRes->numOfRows = 0; + pRes->row = 0; + + pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet + pRes->pLocalReducer = (SLocalReducer *)calloc(1, sizeof(SLocalReducer)); + + /* + * we need one additional byte space the sprintf function needs one additional space to put '\0' at the end of string + */ + size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1; + pRes->pLocalReducer->pResultBuf = (tFilePage *)calloc(1, allocSize); + + pRes->pLocalReducer->pResultBuf->numOfElems = numOfRes; + pRes->data = pRes->pLocalReducer->pResultBuf->data; +} diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c new file mode 100644 index 000000000000..b9d4daee331f --- /dev/null +++ b/src/client/src/tscServer.c @@ -0,0 +1,2989 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "tcache.h" +#include "trpc.h" +#include "tscProfile.h" +#include "tscSecondaryMerge.h" +#include "tscUtil.h" +#include "tschemautil.h" +#include "tsclient.h" +#include "tsocket.h" +#include "tsql.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +#define TSC_MGMT_VNODE 999 + +int tsMasterIndex = 0; +int tsSlaveIndex = 1; +char tsServerIpStr[128] = "127.0.0.1"; +uint32_t tsServerIp; + +int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql); +int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql); +void (*tscUpdateVnodeMsg[TSDB_SQL_MAX])(SSqlObj *pSql); +void tscProcessActivityTimer(void *handle, void *tmrId); +int tscKeepConn[TSDB_SQL_MAX] = {0}; + +static int32_t minMsgSize() { return tsRpcHeadSize + sizeof(STaosDigest); } + +void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { + STscObj *pObj = (STscObj *)param; + if (pObj == NULL) return; + if (pObj != pObj->signature) { + tscError("heart beat msg, pObj:%p, signature:%p invalid", pObj, pObj->signature); + return; + } + + SSqlObj *pSql = pObj->pHb; + SSqlRes *pRes = &pSql->res; + + if (code == 0) { + SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp; + if (pRsp->killConnection) { + tscKillConnection(pObj); + } else { + if (pRsp->queryId) tscKillQuery(pObj, pRsp->queryId); + if (pRsp->streamId) tscKillStream(pObj, pRsp->streamId); + } + } else { + tscTrace("heart beat failed, code:%d", code); + } + + taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer); +} + +void tscProcessActivityTimer(void *handle, void *tmrId) { + STscObj *pObj = (STscObj *)handle; + + if (pObj == NULL) return; + if (pObj->signature != pObj) return; + if (pObj->pTimer != tmrId) return; + + if (pObj->pHb == NULL) { + SSqlObj *pSql = (SSqlObj *)malloc(sizeof(SSqlObj)); + memset(pSql, 0, sizeof(SSqlObj)); + pSql->fp = tscProcessHeartBeatRsp; + pSql->cmd.command = TSDB_SQL_HB; + tscAllocPayloadWithSize(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE); + pSql->param = pObj; + pSql->pTscObj = pObj; + pSql->signature = pSql; + pObj->pHb = pSql; + tscTrace("%p pHb is allocated, pObj:%p", pObj->pHb, pObj); + } + + if (tscShouldFreeHeatBeat(pObj->pHb)) { + tscTrace("%p free HB object and release connection, pConn:%p", pObj, pObj->pHb->thandle); + taosCloseRpcConn(pObj->pHb->thandle); + + tscFreeSqlObj(pObj->pHb); + tscCloseTscObj(pObj); + return; + } + + tscProcessSql(pObj->pHb); +} + +void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { + STscObj *pTscObj = pSql->pTscObj; + + if (pSql->retry < 1) { + *pCode = 0; + pSql->retry++; + void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, TSC_MGMT_VNODE, pTscObj->user); + + if (thandle == NULL) { + SRpcConnInit connInit; + memset(&connInit, 0, sizeof(connInit)); + connInit.cid = 0; + connInit.sid = 0; + connInit.meterId = pSql->pTscObj->user; + connInit.peerId = 0; + connInit.shandle = pTscMgmtConn; + connInit.ahandle = pSql; + connInit.peerPort = tsMgmtShellPort; + connInit.spi = 1; + connInit.encrypt = 0; + connInit.secret = pSql->pTscObj->pass; + + connInit.peerIp = tsServerIpStr; + thandle = taosOpenRpcConn(&connInit, pCode); + } + + pSql->thandle = thandle; + pSql->ip = tsServerIp; + pSql->vnode = TSC_MGMT_VNODE; + } +} + +void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { + SVPeerDesc *pVPeersDesc = NULL; + static int vidIndex = 0; + STscObj * pTscObj = pSql->pTscObj; + + pSql->thandle = NULL; + + SSqlCmd *pCmd = &pSql->cmd; + if (UTIL_METER_IS_METRIC(pCmd)) { // multiple vnode query + int32_t idx = (pCmd->vnodeIdx > 0) ? pCmd->vnodeIdx - 1 : 0; + SVnodeSidList *vnodeList = tscGetVnodeSidList(pCmd->pMetricMeta, idx); + if (vnodeList != NULL) { + pVPeersDesc = vnodeList->vpeerDesc; + } + } else { + SMeterMeta *pMeta = pSql->cmd.pMeterMeta; + if (pMeta == NULL) { + tscError("%p pMeterMeta is NULL", pSql); + pSql->retry = pSql->maxRetry; + return; + } + pVPeersDesc = pMeta->vpeerDesc; + } + + if (pVPeersDesc == NULL) { + pSql->retry = pSql->maxRetry; + tscError("%p pVPeerDesc is NULL", pSql); + } + + while (pSql->retry < pSql->maxRetry) { + (pSql->retry)++; + + *pCode = 0; + void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, pVPeersDesc[0].vnode, pTscObj->user); + + if (thandle == NULL) { + SRpcConnInit connInit; + memset(&connInit, 0, sizeof(connInit)); + connInit.cid = vidIndex; + connInit.sid = 0; + connInit.spi = 0; + connInit.encrypt = 0; + connInit.meterId = pSql->pTscObj->user; + connInit.peerId = htonl((pVPeersDesc[0].vnode << TSDB_SHELL_VNODE_BITS)); + connInit.shandle = pVnodeConn; + connInit.ahandle = pSql; + connInit.peerIp = tsServerIpStr; + connInit.peerPort = tsVnodeShellPort; + thandle = taosOpenRpcConn(&connInit, pCode); + vidIndex = (vidIndex + 1) % tscNumOfThreads; + } + + pSql->thandle = thandle; + pSql->ip = tsServerIp; + pSql->vnode = pVPeersDesc[0].vnode; + break; + } +} + +int tscSendMsgToServer(SSqlObj *pSql) { + uint8_t code = TSDB_CODE_NETWORK_UNAVAIL; + + if (pSql->thandle == NULL) { + if (pSql->cmd.command < TSDB_SQL_MGMT) + tscGetConnToVnode(pSql, &code); + else + tscGetConnToMgmt(pSql, &code); + } + + if (pSql->thandle) { + tscTrace("%p msg:%s is sent to server", pSql, taosMsg[pSql->cmd.msgType]); + char *pStart = taosBuildReqHeader(pSql->thandle, pSql->cmd.msgType, pSql->cmd.payload); + if (pStart) { + if (tscUpdateVnodeMsg[pSql->cmd.command]) (*tscUpdateVnodeMsg[pSql->cmd.command])(pSql); + int ret = taosSendMsgToPeerH(pSql->thandle, pStart, pSql->cmd.payloadLen, pSql); + if (ret >= 0) code = 0; + tscTrace("%p send msg ret:%d code:%d sig:%p", pSql, ret, code, pSql->signature); + } + } + + return code; +} + +void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { + if (ahandle == NULL) return NULL; + + SIntMsg *pMsg = (SIntMsg *)msg; + SSqlObj *pSql = (SSqlObj *)ahandle; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + int code = TSDB_CODE_NETWORK_UNAVAIL; + + if (pSql->signature != pSql) { + tscError("%p sql is already released, signature:%p", pSql, pSql->signature); + return NULL; + } + + if (pSql->thandle != thandle) { + tscError("%p thandle:%p is different from received:%p", pSql, pSql->thandle, thandle); + return NULL; + } + + tscTrace("%p msg:%p is received from server, pConn:%p", pSql, msg, thandle); + + if (pSql->freed || pObj->signature != pObj) { + tscTrace("%p sql is already released or DB connection is closed, freed:%d pObj:%p signature:%p", + pSql, pSql->freed, pObj, pObj->signature); + taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); + tscFreeSqlObj(pSql); + return ahandle; + } + + if (msg == NULL) { + tscTrace("%p no response from ip:0x%x", pSql, pSql->ip); + pSql->index++; + pSql->thandle = NULL; + + // todo taos_stop_query() in async model + /* + * in case of + * 1. query cancelled(pRes->code != TSDB_CODE_QUERY_CANCELLED), do NOT re-issue the + * request to server. + * 2. retrieve, do NOT re-issue the retrieve request since the qhandle may + * have been released by server + */ + if (pCmd->command != TSDB_SQL_FETCH && pCmd->command != TSDB_SQL_RETRIEVE && pCmd->command != TSDB_SQL_KILL_QUERY && + pRes->code != TSDB_CODE_QUERY_CANCELLED) { + code = tscSendMsgToServer(pSql); + if (code == 0) return NULL; + } + + // renew meter meta in case it is changed + if (pCmd->command < TSDB_SQL_FETCH && pRes->code != TSDB_CODE_QUERY_CANCELLED) { + // for fetch, it shall not renew meter meta + pSql->maxRetry = 2; + code = tscRenewMeterMeta(pSql, pCmd->name); + pRes->code = code; + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; + + if (pCmd->pMeterMeta) { + code = tscSendMsgToServer(pSql); + if (code == 0) return pSql; + } + } + } else { + if (pMsg->content[0] == TSDB_CODE_NOT_ACTIVE_SESSION || pMsg->content[0] == TSDB_CODE_NETWORK_UNAVAIL || + pMsg->content[0] == TSDB_CODE_INVALID_SESSION_ID) { + pSql->thandle = NULL; + taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); + + if (UTIL_METER_IS_METRIC(pCmd) && + (pMsg->content[0] == TSDB_CODE_INVALID_SESSION_ID || pMsg->content[0] == TSDB_CODE_NOT_ACTIVE_SESSION)) { + /* + * for metric query, in case of any meter missing during query, sub-query of metric query will failed, + * causing metric query failed, and return TSDB_CODE_METRICMETA_EXPIRED code to app + */ + tscTrace("%p invalid meters id cause metric query failed, code:%d", pSql, pMsg->content[0]); + code = TSDB_CODE_METRICMETA_EXPIRED; + } else if (pCmd->command == TSDB_SQL_CONNECT) { + code = TSDB_CODE_NETWORK_UNAVAIL; + } else if (pCmd->command == TSDB_SQL_HB) { + code = TSDB_CODE_NOT_READY; + } else { + tscTrace("%p it shall renew meter meta, code:%d", pSql, pMsg->content[0]); + pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; + + code = tscRenewMeterMeta(pSql, pCmd->name); + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; + + if (pCmd->pMeterMeta) { + code = tscSendMsgToServer(pSql); + if (code == 0) return pSql; + } + } + + msg = NULL; + } + } + + pSql->retry = 0; + + if (msg) { + if (pCmd->command < TSDB_SQL_MGMT) { + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { + if (pCmd->pMeterMeta) // it may be deleted + pCmd->pMeterMeta->index = pSql->index; + } else { + int32_t idx = (pSql->cmd.vnodeIdx == 0) ? 0 : pSql->cmd.vnodeIdx - 1; + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pCmd->pMetricMeta, idx); + pVnodeSidList->index = pSql->index; + } + } else { + if (pCmd->command > TSDB_SQL_READ) + tsSlaveIndex = pSql->index; + else + tsMasterIndex = pSql->index; + } + } + + if (pSql->fp == NULL) sem_wait(&pSql->emptyRspSem); + + pRes->rspLen = 0; + if (pRes->code != TSDB_CODE_QUERY_CANCELLED) { + pRes->code = (code != TSDB_CODE_SUCCESS) ? code : TSDB_CODE_NETWORK_UNAVAIL; + } else { + tscTrace("%p query is cancelled, code:%d", pSql, pRes->code); + } + + if (msg && pRes->code != TSDB_CODE_QUERY_CANCELLED) { + assert(pMsg->msgType == pCmd->msgType + 1); + pRes->code = pMsg->content[0]; + pRes->rspType = pMsg->msgType; + pRes->rspLen = pMsg->msgLen - sizeof(SIntMsg); + pRes->pRsp = (char *)realloc(pRes->pRsp, pRes->rspLen); + if (pRes->rspLen) memcpy(pRes->pRsp, pMsg->content + 1, pRes->rspLen - 1); + + if (pRes->code == TSDB_CODE_DB_ALREADY_EXIST && pCmd->existsCheck && pRes->rspType == TSDB_MSG_TYPE_CREATE_DB_RSP) { + /* ignore the error information returned from mnode when set ignore flag in sql */ + pRes->code = TSDB_CODE_SUCCESS; + } + + tscTrace("%p cmd:%d code:%d rsp len:%d", pSql, pCmd->command, pRes->code, pRes->rspLen); + + /* + * There is not response callback function for submit response. + * The actual inserted number of points is the first number. + */ + if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT_RSP) { + pRes->numOfRows += *(int32_t *)pRes->pRsp; + } + } + + if (tscKeepConn[pCmd->command] == 0 || + (pRes->code != TSDB_CODE_SUCCESS && pRes->code != TSDB_CODE_ACTION_IN_PROGRESS)) { + if (pSql->thandle != NULL) { + taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); + pSql->thandle = NULL; + } + } + + if (pSql->fp == NULL) { + sem_post(&pSql->rspSem); + } else { + if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) + code = (*tscProcessMsgRsp[pCmd->command])(pSql); + + if (code != TSDB_CODE_ACTION_IN_PROGRESS) { + int command = pCmd->command; + void *taosres = tscKeepConn[command] ? pSql : NULL; + code = pRes->code ? -pRes->code : pRes->numOfRows; + + tscTrace("%p Async SQL result:%d taosres:%p", pSql, code, taosres); + + /* + * Whether to free sqlObj or not should be decided before call the user defined function, since + * this SqlObj may be freed in UDF, and reused by other threads before tscShouldFreeAsyncSqlObj + * called, in which case tscShouldFreeAsyncSqlObj checks an object which is actually allocated by other threads. + * + * If this block of memory is re-allocated for an insert thread, in which tscKeepConn[command] equals to 0, + * the tscShouldFreeAsyncSqlObj will success and tscFreeSqlObj free it immediately. + */ + bool shouldFree = tscShouldFreeAsyncSqlObj(pSql); + if (command == TSDB_SQL_INSERT) { // handle multi-vnode insertion situation + (*pSql->fp)(pSql, taosres, code); + } else { + (*pSql->fp)(pSql->param, taosres, code); + } + + if (shouldFree) { + // If it is failed, all objects allocated during execution taos_connect_a should be released + if (command == TSDB_SQL_CONNECT) { + taos_close(pObj); + tscTrace("%p Async sql close failed connection", pSql); + } else { + tscFreeSqlObj(pSql); + tscTrace("%p Async sql is automatically freed", pSql); + } + } + } + } + + return ahandle; +} + +static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport); +static int tscLaunchMetricSubQueries(SSqlObj *pSql); + +int tscProcessSql(SSqlObj *pSql) { + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + tscTrace("%p SQL cmd:%d will be processed, name:%s", pSql, pSql->cmd.command, pSql->cmd.name); + + // whether don't judge 'isInsertFromFile' ? + if (pSql->cmd.command == TSDB_SQL_INSERT && pCmd->isInsertFromFile == 1) { + // pCmd->isInsertFromFile = 0; // lihui: can not clear the flag + return 0; + } + + pSql->retry = 0; + if (pSql->cmd.command < TSDB_SQL_MGMT) { + pSql->maxRetry = 2; + + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { + pSql->index = pCmd->pMeterMeta->index; + } else { + if (pSql->cmd.vnodeIdx == 0) { // it must be the parent SSqlObj for metric query + // do nothing + } else { + int32_t idx = pSql->cmd.vnodeIdx - 1; + SVnodeSidList *pSidList = tscGetVnodeSidList(pCmd->pMetricMeta, idx); + pSql->index = pSidList->index; + } + } + } else if (pSql->cmd.command < TSDB_SQL_LOCAL) { + pSql->index = pSql->cmd.command < TSDB_SQL_READ ? tsMasterIndex : tsSlaveIndex; + } else { // local handler + return (*tscProcessMsgRsp[pCmd->command])(pSql); + } + + int code = 0; + + if (tscIsTwoStageMergeMetricQuery(pSql)) { // query on metric + /* + * (ref. line: 964) + * Before this function returns from tscLaunchMetricSubQueries and continues, pSql may have been released at user + * program context after retrieving all data from vnodes. User function is called at tscRetrieveFromVnodeCallBack. + * + * when pSql being released, pSql->fp == NULL, it may pass the check of pSql->fp == NULL, + * which causes deadlock. So we keep it as local variable. + */ + void *fp = pSql->fp; + + if (tscLaunchMetricSubQueries(pSql) != TSDB_CODE_SUCCESS) { + return pRes->code; + } + + if (fp == NULL) { + sem_post(&pSql->emptyRspSem); + sem_wait(&pSql->rspSem); + + assert(pSql->cmd.vnodeIdx == 0); + sem_post(&pSql->emptyRspSem); + + // set the command flag must be after the semaphore been correctly set. + pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + } + + return pSql->res.code; + } else { + void *asyncFp = pSql->fp; + if (tscBuildMsg[pCmd->command](pSql) < 0) { // build msg failed + code = TSDB_CODE_APP_ERROR; + } else { + code = tscSendMsgToServer(pSql); + } + if (asyncFp) { + if (code != 0) { + pRes->code = code; + tscQueueAsyncRes(pSql); + } + return 0; + } + } + + if (code != 0) { + pRes->code = code; + return code; + } + + sem_wait(&pSql->rspSem); + + if (pRes->code == 0 && tscProcessMsgRsp[pCmd->command]) (*tscProcessMsgRsp[pCmd->command])(pSql); + + sem_post(&pSql->emptyRspSem); + + return pRes->code; +} + +int tscLaunchMetricSubQueries(SSqlObj *pSql) { + SSqlRes *pRes = &pSql->res; + + /* pRes->code check only serves in launching metric sub-queries */ + if (pRes->code == TSDB_CODE_QUERY_CANCELLED) { + pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; // enable the abort of kill metric function. + return pSql->res.code; + } + + tExtMemBuffer ** pMemoryBuf = NULL; + tOrderDescriptor *pDesc = NULL; + tColModel * pModel = NULL; + + pRes->qhandle = 1; // hack the qhandle check + + const uint32_t nBufferSize = (1 << 16); // 64KB + int32_t numOfVnodes = pSql->cmd.pMetricMeta->numOfVnodes; + assert(numOfVnodes > 0); + + int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize); + if (ret != 0) { + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + if (pSql->fp) { + tscQueueAsyncRes(pSql); + } + return pRes->code; + } + + pSql->pSubs = malloc(POINTER_BYTES * numOfVnodes); + pSql->numOfSubs = numOfVnodes; + + tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfVnodes); + int32_t * retrievedDoneRec = calloc(1, sizeof(int64_t) << 1); + int32_t * subStatusCode = &retrievedDoneRec[1]; + uint64_t *numOfTotalRetrievedPoints = (uint64_t *)&retrievedDoneRec[2]; + + pRes->code = TSDB_CODE_SUCCESS; + + for (int32_t i = 0; i < numOfVnodes; ++i) { + if (pRes->code == TSDB_CODE_QUERY_CANCELLED || pRes->code == TSDB_CODE_CLI_OUT_OF_MEMORY) { + /* + * during launch sub queries, if the master query is cancelled. + * the remain is ignored and set the retrieveDoneRec to the value of remaining + * not built sub-queries. So, the already issued sub queries can successfully free allocated resources. + */ + *retrievedDoneRec = (numOfVnodes - i); + + if (i == 0) { + /* + * if i > 0, at least one sub query is issued, the allocated resource is done by it when it completed. + */ + tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, nBufferSize); + free(retrievedDoneRec); + pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + // enable the abort of kill metric function. + return pSql->res.code; + } + break; + } + + SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); + trs->pExtMemBuffer = pMemoryBuf; + trs->pOrderDescriptor = pDesc; + trs->numOfFinished = retrievedDoneRec; + trs->code = subStatusCode; + trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); + trs->vnodeIdx = i + 1; + trs->numOfVnodes = numOfVnodes; + trs->pParentSqlObj = pSql; + trs->pFinalColModel = pModel; + trs->numOfTotalRetrievedPoints = numOfTotalRetrievedPoints; + + pthread_mutexattr_t mutexattr = {0}; + pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE_NP); + pthread_mutex_init(&trs->queryMutex, &mutexattr); + pthread_mutexattr_destroy(&mutexattr); + + SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs); + tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, pNew->cmd.vnodeIdx); + tscProcessSql(pNew); + } + + return TSDB_CODE_SUCCESS; +} + +static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) { + tscTrace("%p start to free subquery result", pSql); + + if (pSql->res.code == TSDB_CODE_SUCCESS) { + taos_free_result(pSql); + } + tfree(trsupport->localBuffer); + + pthread_mutex_unlock(&trsupport->queryMutex); + pthread_mutex_destroy(&trsupport->queryMutex); + + tfree(trsupport); +} + +static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t errCode) { + char buf[256] = {0}; + strerror_r(errno, buf, 256); + tscError("sub:%p failed to flush data to disk:reason:%s", tres, buf); + + *(trsupport->code) = -errCode; + trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + + pthread_mutex_unlock(&trsupport->queryMutex); + + tscRetrieveFromVnodeCallBack(trsupport, tres, *(trsupport->code)); +} + +static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) { + SSqlObj *pPObj = trsupport->pParentSqlObj; + int32_t idx = trsupport->vnodeIdx; + + assert(pSql != NULL); + + /* retrieved in subquery failed. OR query cancelled in retrieve phase. */ + if (*trsupport->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) { + *trsupport->code = -(int)pPObj->res.code; + + /* + * kill current sub-query connection, which may retrieve data from vnodes; + * Here we get: pPObj->res.code == TSDB_CODE_QUERY_CANCELLED + */ + pSql->res.numOfRows = 0; + trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts + tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql, + trsupport->vnodeIdx, *trsupport->code); + } + + if (numOfRows >= 0) { + /* current query is successful, but other sub query failed, still abort current query. */ + tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, idx); + tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", + pPObj, pSql, idx, *trsupport->code); + } else { + if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && *(trsupport->code) == TSDB_CODE_SUCCESS) { + /* + * current query failed, and the retry count is less than the available count, + * retry query clear previous retrieved data, then launch a new sub query + */ + tExtMemBufferClear(trsupport->pExtMemBuffer[idx - 1]); + + // clear local saved number of results + trsupport->localBuffer->numOfElems = 0; + + pthread_mutex_unlock(&trsupport->queryMutex); + + SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport); + tscTrace("%p sub:%p retrieve failed, code:%d, orderOfSub:%d, retry:%d, new SqlObj:%p", + trsupport->pParentSqlObj, pSql, numOfRows, idx, trsupport->numOfRetry, pNew); + + tscProcessSql(pNew); + return; + } else { + /* reach the maximum retry count, abort. */ + __sync_val_compare_and_swap_32(trsupport->code, TSDB_CODE_SUCCESS, numOfRows); + tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", + pPObj, pSql, numOfRows, idx, *trsupport->code); + } + } + + if (__sync_add_and_fetch_32(trsupport->numOfFinished, 1) < trsupport->numOfVnodes) { + return tscFreeSubSqlObj(trsupport, pSql); + } + + // all subqueries are failed + tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, trsupport->numOfVnodes, *trsupport->code); + pPObj->res.code = -(*trsupport->code); + + // release allocated resource + tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, + trsupport->numOfVnodes); + + tfree(trsupport->numOfFinished); + tscFreeSubSqlObj(trsupport, pSql); + + if (pPObj->fp == NULL) { + // sync query, wait for the master SSqlObj to proceed + sem_wait(&pPObj->emptyRspSem); + sem_wait(&pPObj->emptyRspSem); + + sem_post(&pPObj->rspSem); + + pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + } else { + // in async query model, no need to sync operation + if (pPObj->res.code != 0) { + tscQueueAsyncRes(pPObj); + } + } +} + +void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { + SRetrieveSupport *trsupport = (SRetrieveSupport *)param; + int32_t idx = trsupport->vnodeIdx; + SSqlObj * pPObj = trsupport->pParentSqlObj; + tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; + + SSqlObj *pSql = (SSqlObj *)tres; + if (pSql == NULL) { + /* sql object has been released in error process, return immediately */ + tscTrace("%p subquery has been released, idx:%d, abort", pPObj, idx); + return; + } + + // query process and cancel query process may execute at the same time + pthread_mutex_lock(&trsupport->queryMutex); + + if (numOfRows < 0 || *(trsupport->code) < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) { + return tscHandleSubRetrievalError(trsupport, pSql, numOfRows); + } + + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + SVnodeSidList *vnodeInfo = tscGetVnodeSidList(pCmd->pMetricMeta, idx - 1); + SVPeerDesc * pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index]; + + if (numOfRows > 0) { + assert(pRes->numOfRows == numOfRows); + __sync_add_and_fetch_64(trsupport->numOfTotalRetrievedPoints, numOfRows); + + tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", + pPObj, pSql, pRes->numOfRows, *trsupport->numOfTotalRetrievedPoints, pSvd->ip, pSvd->vnode, idx); + +#ifdef _DEBUG_VIEW + printf("received data from vnode: %d rows\n", pRes->numOfRows); + SSrcColumnInfo colInfo[256] = {0}; + tscGetSrcColumnInfo(colInfo, &pPObj->cmd); + tColModelDisplayEx(pDesc->pSchema, pRes->data, pRes->numOfRows, pRes->numOfRows, colInfo); +#endif + int32_t ret = saveToBuffer(trsupport->pExtMemBuffer[idx - 1], pDesc, trsupport->localBuffer, pRes->data, + pRes->numOfRows, pCmd->groupbyExpr.orderType); + if (ret < 0) { + // set no disk space error info, and abort retry + tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); + } else { + pthread_mutex_unlock(&trsupport->queryMutex); + taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); + } + + } else { + // all data has been retrieved to client data in from current vnode is stored in cache and disk + uint32_t numOfRowsFromVnode = trsupport->pExtMemBuffer[idx - 1]->numOfAllElems + trsupport->localBuffer->numOfElems; + tscTrace("%p sub:%p all data retrieved from ip:%u,vid:%d, numOfRows:%d, orderOfSub:%d", + pPObj, pSql, pSvd->ip, pSvd->vnode, numOfRowsFromVnode, idx); + + tColModelCompress(pDesc->pSchema, trsupport->localBuffer, pDesc->pSchema->maxCapacity); + +#ifdef _DEBUG_VIEW + printf("%ld rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); + SSrcColumnInfo colInfo[256] = {0}; + tscGetSrcColumnInfo(colInfo, &pPObj->cmd); + tColModelDisplayEx(pDesc->pSchema, trsupport->localBuffer->data, trsupport->localBuffer->numOfElems, + trsupport->localBuffer->numOfElems, colInfo); +#endif + + // each result for a vnode is ordered as an independant list, + // then used as an input of loser tree for disk-based merge routine + int32_t ret = tscFlushTmpBuffer(trsupport->pExtMemBuffer[idx - 1], pDesc, trsupport->localBuffer, + pCmd->groupbyExpr.orderType); + if (ret != 0) { + /* set no disk space error info, and abort retry */ + return tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_CLI_NO_DISKSPACE); + } + + if (__sync_add_and_fetch_32(trsupport->numOfFinished, 1) < trsupport->numOfVnodes) { + return tscFreeSubSqlObj(trsupport, pSql); + } + + // all sub-queries are returned, start to local merge process + pDesc->pSchema->maxCapacity = trsupport->pExtMemBuffer[idx - 1]->numOfElemsPerPage; + + tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", + pPObj, trsupport->numOfVnodes, *trsupport->numOfTotalRetrievedPoints); + + tscClearInterpInfo(&pPObj->cmd); + tscCreateLocalReducer(trsupport->pExtMemBuffer, trsupport->numOfVnodes, pDesc, trsupport->pFinalColModel, + &pPObj->cmd, &pPObj->res); + tscTrace("%p build loser tree completed", pPObj); + + pPObj->res.precision = pSql->res.precision; + pPObj->res.numOfRows = 0; + pPObj->res.row = 0; + + // only free once + free(trsupport->numOfFinished); + tscFreeSubSqlObj(trsupport, pSql); + + if (pPObj->fp == NULL) { + sem_wait(&pPObj->emptyRspSem); + sem_wait(&pPObj->emptyRspSem); + + sem_post(&pPObj->rspSem); + } else { + // set the command flag must be after the semaphore been correctly set. + pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + if (pPObj->res.code == TSDB_CODE_SUCCESS) { + (*pPObj->fp)(pPObj->param, pPObj, 0); + } else { + tscQueueAsyncRes(pPObj); + } + } + } +} + +void tscKillMetricQuery(SSqlObj *pSql) { + if (!tscIsTwoStageMergeMetricQuery(pSql)) { + return; + } + + for (int i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj *pSub = pSql->pSubs[i]; + + if (pSub == NULL || pSub->thandle == NULL) { + continue; + } + /* + * here, we cannot set the command = TSDB_SQL_KILL_QUERY. Otherwise, it may cause + * sub-queries not correctly released and master sql object of metric query reaches an abnormal state. + */ + pSql->pSubs[i]->res.code = TSDB_CODE_QUERY_CANCELLED; + taosStopRpcConn(pSql->pSubs[i]->thandle); + } + + pSql->numOfSubs = 0; + + /* + * 1. if the subqueries are not launched or partially launched, we need to waiting the launched + * query return to successfully free allocated resources. + * 2. if no any subqueries are launched yet, which means the metric query only in parse sql stage, + * set the res.code, and return. + */ + const int64_t MAX_WAITING_TIME = 10000; // 10 Sec. + int64_t stime = taosGetTimestampMs(); + + while (pSql->cmd.command != TSDB_SQL_RETRIEVE_METRIC && pSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) { + taosMsleep(100); + if (taosGetTimestampMs() - stime > MAX_WAITING_TIME) { + break; + } + } + + tscTrace("%p metric query is cancelled", pSql); +} + +static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport) { + SSqlCmd *pCmd = &pSql->cmd; + + SSqlObj *pNew = (SSqlObj *)calloc(1, sizeof(SSqlObj)); + + pSql->pSubs[trsupport->vnodeIdx - 1] = pNew; + pNew->pTscObj = pSql->pTscObj; + pNew->signature = pNew; + pNew->sqlstr = strdup(pSql->sqlstr); + + memcpy(&pNew->cmd, pCmd, sizeof(SSqlCmd)); + pNew->cmd.command = TSDB_SQL_SELECT; + pNew->cmd.payload = NULL; + pNew->cmd.allocSize = 0; + + tscTagCondAssign(&pNew->cmd.tagCond, &pCmd->tagCond); + + tscAllocPayloadWithSize(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); + tscColumnInfoClone(&pCmd->colList, &pNew->cmd.colList); + tscFieldInfoClone(&pCmd->fieldsInfo, &pNew->cmd.fieldsInfo); + tscSqlExprClone(&pCmd->exprsInfo, &pNew->cmd.exprsInfo); + + pNew->fp = tscRetrieveDataRes; + + pNew->param = trsupport; + pNew->cmd.vnodeIdx = trsupport->vnodeIdx; + + char key[TSDB_MAX_TAGS_LEN + 1] = {0}; + tscGetMetricMetaCacheKey(&pNew->cmd, key); + pNew->cmd.pMetricMeta = taosGetDataFromCache(tscCacheHandle, key); + pNew->cmd.pMeterMeta = taosGetDataFromCache(tscCacheHandle, pCmd->name); + + assert(pNew->cmd.pMeterMeta != NULL && pNew->cmd.pMetricMeta != NULL); + + return pNew; +} + +void tscRetrieveDataRes(void *param, TAOS_RES *tres, int retCode) { + SRetrieveSupport *trsupport = (SRetrieveSupport *)param; + + SSqlObj *pSql = (SSqlObj *)tres; + int32_t idx = pSql->cmd.vnodeIdx; + + SVnodeSidList *vnodeInfo = NULL; + if (pSql->cmd.pMetricMeta != NULL) { + vnodeInfo = tscGetVnodeSidList(pSql->cmd.pMetricMeta, idx - 1); + } + + if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || *trsupport->code != TSDB_CODE_SUCCESS) { + // metric query is killed, Note: retCode must be less than 0 + trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; + if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS) { + retCode = -(int)(trsupport->pParentSqlObj->res.code); + } else { + retCode = (*trsupport->code); + } + tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql, + trsupport->vnodeIdx, retCode); + } + + /* + * if a query on vnode is failed, all retrieve operations from vnode that occurs later + * than this one are actually not necessary, we simply call the tscRetrieveFromVnodeCallBack + * function to abort current and remain retrieve process. + * Note: threadsafe is required. + */ + if (retCode != TSDB_CODE_SUCCESS) { + if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) { + tscTrace("%p sub:%p reach the max retry count,set global code:%d", trsupport->pParentSqlObj, pSql, retCode); + __sync_val_compare_and_swap_32(trsupport->code, 0, retCode); + } else { // does not reach the maximum retry count, go on + SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport); + tscTrace("%p sub:%p failed code:%d, retry:%d, new SqlObj:%p", trsupport->pParentSqlObj, pSql, retCode, + trsupport->numOfRetry, pNew); + + tscProcessSql(pNew); + return; + } + } + + if (*(trsupport->code) != TSDB_CODE_SUCCESS) { // failed, abort + if (vnodeInfo != NULL) { + tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, + vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, + trsupport->vnodeIdx, *(trsupport->code)); + } else { + tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, + trsupport->vnodeIdx, *(trsupport->code)); + } + + tscRetrieveFromVnodeCallBack(param, tres, *(trsupport->code)); + } else { // success, proceed to retrieve data from dnode + tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, + trsupport->vnodeIdx); + + taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); + } +} + +int tscBuildRetrieveMsg(SSqlObj *pSql) { + char *pMsg, *pStart; + int msgLen = 0; + + pStart = pSql->cmd.payload + tsRpcHeadSize; + pMsg = pStart; + + *((uint64_t *)pMsg) = pSql->res.qhandle; + pMsg += 8; + *pMsg = pSql->cmd.type; + pMsg += 1; + + msgLen = pMsg - pStart; + pSql->cmd.payloadLen = msgLen; + pSql->cmd.msgType = TSDB_MSG_TYPE_RETRIEVE; + + return msgLen; +} + +void tscUpdateVnodeInSubmitMsg(SSqlObj *pSql) { + SShellSubmitMsg *pShellMsg; + char * pMsg; + SMeterMeta * pMeterMeta = pSql->cmd.pMeterMeta; + + pMsg = pSql->cmd.payload + tsRpcHeadSize; + + pShellMsg = (SShellSubmitMsg *)pMsg; + pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); + tscTrace("%p update submit msg vnode:%d", pSql, htons(pShellMsg->vnode)); +} + +int tscBuildSubmitMsg(SSqlObj *pSql) { + SShellSubmitMsg *pShellMsg; + char * pMsg, *pStart; + int msgLen = 0; + SMeterMeta * pMeterMeta = pSql->cmd.pMeterMeta; + + pStart = pSql->cmd.payload + tsRpcHeadSize; + pMsg = pStart; + + pShellMsg = (SShellSubmitMsg *)pMsg; + pShellMsg->import = pSql->cmd.order.order; + pShellMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); + pShellMsg->numOfSid = htonl(pSql->cmd.count); /* number of meters to be inserted */ + + pMsg += sizeof(SShellSubmitMsg); + + /* + * pSql->cmd.payloadLen is set during parse sql routine, so we do not use it here + */ + pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT; + tscTrace("%p update submit msg vnode:%d", pSql, htons(pShellMsg->vnode)); + + return msgLen; +} + +void tscUpdateVnodeInQueryMsg(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + char * pStart = pCmd->payload + tsRpcHeadSize; + + SQueryMeterMsg *pQueryMsg = (SQueryMeterMsg *)pStart; + + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { // pSchema == NULL, query on meter + SMeterMeta *pMeterMeta = pCmd->pMeterMeta; + pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); + } else { // query on metric + SMetricMeta * pMetricMeta = pCmd->pMetricMeta; + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx - 1); + pQueryMsg->vnode = htons(pVnodeSidList->vpeerDesc[pSql->index].vnode); + } +} + +/* + * for meter query, simply return the size <= 1k + * for metric query, estimate size according to meter tags + */ +static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { + const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5; + int32_t srcColListSize = pCmd->numOfCols * sizeof(SColumnFilterMsg); + + int32_t exprSize = sizeof(SSqlFuncExprMsg) * pCmd->fieldsInfo.numOfOutputCols; + + // meter query without tags values + if (!UTIL_METER_IS_METRIC(pCmd)) { + return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryMeterMsg) + srcColListSize + exprSize; + } + + SMetricMeta *pMetricMeta = pCmd->pMetricMeta; + + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx - 1); + + int32_t meterInfoSize = (pMetricMeta->tagLen + sizeof(SMeterSidExtInfo)) * pVnodeSidList->numOfSids; + int32_t outputColumnSize = pCmd->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); + + return meterInfoSize + outputColumnSize + srcColListSize + exprSize + MIN_QUERY_MSG_PKT_SIZE; +} + +int tscBuildQueryMsg(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + + int32_t size = tscEstimateQueryMsgSize(pCmd); + tscAllocPayloadWithSize(pCmd, size); + + char *pStart = pCmd->payload + tsRpcHeadSize; + + SMeterMeta * pMeterMeta = pCmd->pMeterMeta; + SMetricMeta *pMetricMeta = pCmd->pMetricMeta; + + SQueryMeterMsg *pQueryMsg = (SQueryMeterMsg *)pStart; + + int32_t msgLen = 0; + int32_t numOfMeters = 0; + + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { // pSchema == NULL, query on meter + numOfMeters = 1; + + tscTrace("%p query on vnode: %d, number of sid:%d, meter id: %s", pSql, + pMeterMeta->vpeerDesc[pMeterMeta->index].vnode, 1, pCmd->name); + + pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode); + pQueryMsg->uid = pMeterMeta->uid; + pQueryMsg->numOfTagsCols = 0; + } else { // query on metric + SMetricMeta *pMetricMeta = pCmd->pMetricMeta; + if (pCmd->vnodeIdx <= 0) { + tscError("%p error vnodeIdx:%d", pSql, pCmd->vnodeIdx); + return -1; + } + + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx - 1); + uint32_t vnodeId = pVnodeSidList->vpeerDesc[pVnodeSidList->index].vnode; + + numOfMeters = pVnodeSidList->numOfSids; + if (numOfMeters <= 0) { + tscError("%p vid:%d,error numOfMeters in query message:%d", pSql, vnodeId, numOfMeters); + return -1; // error + } + + tscTrace("%p query on vid:%d, number of sid:%d", pSql, vnodeId, numOfMeters); + pQueryMsg->vnode = htons(vnodeId); + } + + pQueryMsg->numOfSids = htonl(numOfMeters); + pQueryMsg->numOfTagsCols = htons(pCmd->numOfReqTags); + + if (pCmd->order.order == TSQL_SO_ASC) { + pQueryMsg->skey = htobe64(pCmd->stime); + pQueryMsg->ekey = htobe64(pCmd->etime); + } else { + pQueryMsg->skey = htobe64(pCmd->etime); + pQueryMsg->ekey = htobe64(pCmd->stime); + } + + pQueryMsg->num = htonl(0); + pQueryMsg->order = htons(pCmd->order.order); + pQueryMsg->orderColId = htons(pCmd->order.orderColId); + + pQueryMsg->interpoType = htons(pCmd->interpoType); + + pQueryMsg->limit = htobe64(pCmd->limit.limit); + pQueryMsg->offset = htobe64(pCmd->limit.offset); + + pQueryMsg->numOfCols = htons(pCmd->colList.numOfCols); + + if (pCmd->colList.numOfCols <= 0) { + tscError("%p illegal value of numOfCols in query msg: %d", pSql, pMeterMeta->numOfColumns); + return -1; + } + + if (pMeterMeta->numOfTags < 0) { + tscError("%p illegal value of numOfTagsCols in query msg: %d", pSql, pMeterMeta->numOfTags); + return -1; + } + + pQueryMsg->nAggTimeInterval = htobe64(pCmd->nAggTimeInterval); + pQueryMsg->intervalTimeUnit = pCmd->intervalTimeUnit; + if (pCmd->nAggTimeInterval < 0) { + tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pCmd->nAggTimeInterval); + return -1; + } + + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { // query on meter + assert(pCmd->groupbyExpr.numOfGroupbyCols == 0); + pQueryMsg->tagLength = 0; + } else { // query on metric + + if (pCmd->groupbyExpr.numOfGroupbyCols > 0) { + pQueryMsg->numOfGroupbyCols = htons(pCmd->groupbyExpr.numOfGroupbyCols); + if (pCmd->groupbyExpr.numOfGroupbyCols < 0) { + tscError("%p illegal value of numOfGroupbyCols in query msg: %d", pSql, pCmd->groupbyExpr.numOfGroupbyCols); + return -1; + } + } else { // no group by clause + pQueryMsg->numOfGroupbyCols = 0; + } + pQueryMsg->tagLength = htons(pMetricMeta->tagLen); + } + + pQueryMsg->metricQuery = htons(pCmd->metricQuery); + pQueryMsg->numOfOutputCols = htons(pCmd->fieldsInfo.numOfOutputCols); + + if (pCmd->fieldsInfo.numOfOutputCols < 0) { + tscError("%p illegal value of number of output columns in query msg: %d", pSql, pCmd->fieldsInfo.numOfOutputCols); + return -1; + } + + // set column list ids + char *pMsg = (char *)(pQueryMsg->colList); + char *pBinaryBuf = pMsg + sizeof(pQueryMsg->colList[0]) * pCmd->colList.numOfCols; + + SSchema *pSchema = tsGetSchema(pMeterMeta); + + for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { + SColumnBase *pCol = tscColumnInfoGet(pCmd, i); + SSchema * pColSchema = &pSchema[pCol->colIndex]; + + if (pCol->colIndex >= pMeterMeta->numOfColumns || pColSchema->type < TSDB_DATA_TYPE_BOOL || + pColSchema->type > TSDB_DATA_TYPE_NCHAR) { + tscError("%p vid:%d sid:%d id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s", + pSql, htons(pQueryMsg->vnode), pMeterMeta->sid, pCmd->name, pMeterMeta->numOfColumns, pCol->colIndex, + pColSchema->name); + + return 0; // 0 means build msg failed + } + + pQueryMsg->colList[i].colId = htons(pColSchema->colId); + pQueryMsg->colList[i].bytes = htons(pColSchema->bytes); + + pQueryMsg->colList[i].type = htons(pColSchema->type); + + pQueryMsg->colList[i].filterOn = htons(pCol->filterOn); + pQueryMsg->colList[i].filterOnBinary = htons(pCol->filterOnBinary); + + if (pCol->filterOn && pCol->filterOnBinary) { + pQueryMsg->colList[i].len = htobe64(pCol->len); + memcpy(pBinaryBuf, (void *)pCol->pz, pCol->len + 1); + pBinaryBuf += pCol->len + 1; + } else { + pQueryMsg->colList[i].lowerBndi = htobe64(pCol->lowerBndi); + pQueryMsg->colList[i].upperBndi = htobe64(pCol->upperBndi); + } + + pQueryMsg->colList[i].lowerRelOptr = htons(pCol->lowerRelOptr); + pQueryMsg->colList[i].upperRelOptr = htons(pCol->upperRelOptr); + + pMsg += sizeof(SColumnFilterMsg); + } + + bool hasArithmeticFunction = false; + + pMsg = pBinaryBuf; + SSqlFuncExprMsg *pSqlFuncExpr = (SSqlFuncExprMsg *)pBinaryBuf; + + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + + if (pExpr->sqlFuncId == TSDB_FUNC_ARITHM) { + hasArithmeticFunction = true; + } + + if (!tscValidateColumnId(pCmd, pExpr->colInfo.colId)) { + /* column id is not valid according to the cached metermeta, the meter meta is expired */ + tscError("%p table schema is not matched with parsed sql", pSql); + return -1; + } + + pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId); + pSqlFuncExpr->colInfo.colIdx = htons(pExpr->colInfo.colIdx); + pSqlFuncExpr->colInfo.isTag = pExpr->colInfo.isTag; + + pSqlFuncExpr->functionId = htons(pExpr->sqlFuncId); + pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); + pMsg += sizeof(SSqlFuncExprMsg); + + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { + pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType); + pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen); + + if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { + memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen); + pMsg += pExpr->param[j].nLen + 1; // by plus one char to make the string null-terminated + } else { + pSqlFuncExpr->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key); + } + } + + pSqlFuncExpr = (SSqlFuncExprMsg *)pMsg; + } + + int32_t len = 0; + if (hasArithmeticFunction) { + SColumnBase *pColBase = pCmd->colList.pColList; + for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) { + char * name = pSchema[pColBase[i].colIndex].name; + int32_t lenx = strlen(name); + memcpy(pMsg, name, lenx); + *(pMsg + lenx) = ','; + + len += (lenx + 1); // one for comma + pMsg += (lenx + 1); + } + } + + pQueryMsg->colNameLen = htonl(len); + + // set sids list + tscTrace("%p vid:%d, query on %d meters", pSql, pSql->cmd.vnodeIdx, numOfMeters); + if (UTIL_METER_IS_NOMRAL_METER(pCmd)) { +#ifdef _DEBUG_VIEW + tscTrace("%p %d", pSql, pCmd->pMeterMeta->sid); +#endif + SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg; + pSMeterTagInfo->sid = htonl(pMeterMeta->sid); + pMsg += sizeof(SMeterSidExtInfo); + } else { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx - 1); + + for (int32_t i = 0; i < numOfMeters; ++i) { + SMeterSidExtInfo *pMeterTagInfo = (SMeterSidExtInfo *)pMsg; + SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i); + + pMeterTagInfo->sid = htonl(pQueryMeterInfo->sid); + pMsg += sizeof(SMeterSidExtInfo); + +#ifdef _DEBUG_VIEW + tscTrace("%p %d", pSql, pQueryMeterInfo->sid); +#endif + + memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen); + pMsg += pMetricMeta->tagLen; + } + } + + /* only include the required tag column schema. If a tag is not required, it won't be sent to vnode */ + if (UTIL_METER_IS_METRIC(pCmd) && pCmd->numOfReqTags > 0) { // always transfer tag schema to vnode if exists + SSchema *pTagSchema = tsGetTagSchema(pMeterMeta); + + for (int32_t j = 0; j < pCmd->numOfReqTags; ++j) { + if (pCmd->tagColumnIndex[j] == -1) { + SSchema tbSchema = {.bytes = TSDB_METER_NAME_LEN, .colId = -1, .type = TSDB_DATA_TYPE_BINARY}; + memcpy(pMsg, &tbSchema, sizeof(SSchema)); + } else { + memcpy(pMsg, &pTagSchema[pCmd->tagColumnIndex[j]], sizeof(SSchema)); + } + + pMsg += sizeof(SSchema); + } + } + + SSqlGroupbyExpr *pGroupbyExpr = &pCmd->groupbyExpr; + if (pGroupbyExpr->numOfGroupbyCols != 0) { + assert(pMeterMeta->numOfTags != 0); + + pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIdx); + pQueryMsg->orderType = htons(pGroupbyExpr->orderType); + + for (int32_t j = 0; j < pGroupbyExpr->numOfGroupbyCols; ++j) { + *((int16_t *)pMsg) = pGroupbyExpr->tagIndex[j]; + pMsg += sizeof(pGroupbyExpr->tagIndex[j]); + } + } + + if (pCmd->interpoType != TSDB_INTERPO_NONE) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + *((int64_t *)pMsg) = htobe64(pCmd->defaultVal[i]); + pMsg += sizeof(pCmd->defaultVal[0]); + } + } + + msgLen = pMsg - pStart; + + tscTrace("%p msg built success,len:%d bytes", pSql, msgLen); + pCmd->payloadLen = msgLen; + pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY; + + assert(msgLen + minMsgSize() <= size); + return msgLen; +} + +int tscBuildCreateDbMsg(SSqlObj *pSql) { + SCreateDbMsg *pCreateDbMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pStart = pCmd->payload + tsRpcHeadSize; + pMsg = pStart; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pCreateDbMsg = (SCreateDbMsg *)pMsg; + strcpy(pCreateDbMsg->db, pCmd->name); + + pCreateDbMsg->replications = pCmd->defaultVal[0]; + pCreateDbMsg->daysPerFile = htonl(pCmd->defaultVal[1]); + + pCreateDbMsg->rowsInFileBlock = htonl(pCmd->defaultVal[3]); + pCreateDbMsg->cacheBlockSize = htonl(pCmd->defaultVal[4]); + pCreateDbMsg->blocksPerMeter = htons(pCmd->defaultVal[5]); + pCreateDbMsg->maxSessions = htonl(pCmd->defaultVal[6]); + + pCreateDbMsg->commitTime = htonl(pCmd->defaultVal[7]); + pCreateDbMsg->commitLog = pCmd->defaultVal[8]; + pCreateDbMsg->compression = pCmd->defaultVal[9]; + pCreateDbMsg->cacheNumOfBlocks.fraction = *(double *)&(pCmd->defaultVal[10]); + + int32_t n = pCmd->defaultVal[11]; + switch (n) { + case 1: + pCreateDbMsg->daysToKeep = htonl(pCmd->defaultVal[12]); + break; + case 2: { + pCreateDbMsg->daysToKeep = htonl(pCmd->defaultVal[12]); + pCreateDbMsg->daysToKeep1 = htonl(pCmd->defaultVal[13]); + break; + } + case 3: { + pCreateDbMsg->daysToKeep = htonl(pCmd->defaultVal[12]); + pCreateDbMsg->daysToKeep1 = htonl(pCmd->defaultVal[13]); + pCreateDbMsg->daysToKeep2 = htonl(pCmd->defaultVal[14]); + break; + } + } + pCreateDbMsg->precision = pCmd->defaultVal[15]; + + pMsg += sizeof(SCreateDbMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_DB; + + return msgLen; +} + +int tscBuildCreateDnodeMsg(SSqlObj *pSql) { + SCreateDnodeMsg *pCreate; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pCreate = (SCreateDnodeMsg *)pMsg; + strcpy(pCreate->ip, pCmd->name); + + pMsg += sizeof(SCreateDnodeMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_PNODE; + + return msgLen; +} + +int tscBuildDropDnodeMsg(SSqlObj *pSql) { + SDropDnodeMsg *pDrop; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pDrop = (SDropDnodeMsg *)pMsg; + strcpy(pDrop->ip, pCmd->name); + + pMsg += sizeof(SDropDnodeMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_DROP_PNODE; + + return msgLen; +} + +int tscBuildCreateUserMsg(SSqlObj *pSql) { + SCreateUserMsg *pCreateMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pCreateMsg = (SCreateUserMsg *)pMsg; + strcpy(pCreateMsg->user, pCmd->name); + strcpy(pCreateMsg->pass, pCmd->payload); + + pMsg += sizeof(SCreateUserMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_USER; + + return msgLen; +} + +static int tscBuildAcctMsgImpl(SSqlObj *pSql) { + SCreateAcctMsg *pAlterMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pAlterMsg = (SCreateAcctMsg *)pMsg; + strcpy(pAlterMsg->user, pCmd->name); + strcpy(pAlterMsg->pass, pCmd->payload); + + pMsg += sizeof(SCreateAcctMsg); + + pAlterMsg->cfg.maxUsers = htonl((int32_t)pCmd->defaultVal[0]); + pAlterMsg->cfg.maxDbs = htonl((int32_t)pCmd->defaultVal[1]); + pAlterMsg->cfg.maxTimeSeries = htonl((int32_t)pCmd->defaultVal[2]); + pAlterMsg->cfg.maxStreams = htonl((int32_t)pCmd->defaultVal[3]); + pAlterMsg->cfg.maxPointsPerSecond = htonl((int32_t)pCmd->defaultVal[4]); + pAlterMsg->cfg.maxStorage = htobe64(pCmd->defaultVal[5]); + pAlterMsg->cfg.maxQueryTime = htobe64(pCmd->defaultVal[6]); + pAlterMsg->cfg.maxConnections = htonl((int32_t)pCmd->defaultVal[7]); + pAlterMsg->cfg.accessState = (int8_t)pCmd->defaultVal[8]; + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + + return msgLen; +} + +int tscBuildCreateAcctMsg(SSqlObj *pSql) { + int msgLen = tscBuildAcctMsgImpl(pSql); + pSql->cmd.msgType = TSDB_MSG_TYPE_CREATE_ACCT; + return msgLen; +} + +int tscBuildAlterAcctMsg(SSqlObj *pSql) { + int msgLen = tscBuildAcctMsgImpl(pSql); + pSql->cmd.msgType = TSDB_MSG_TYPE_ALTER_ACCT; + return msgLen; +} + +int tscBuildAlterUserMsg(SSqlObj *pSql) { + SAlterUserMsg *pAlterMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pAlterMsg = (SCreateUserMsg *)pMsg; + strcpy(pAlterMsg->user, pCmd->name); + strcpy(pAlterMsg->pass, pCmd->payload); + pAlterMsg->flag = pCmd->order.order; + pAlterMsg->privilege = (char)pCmd->count; + + pMsg += sizeof(SAlterUserMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_ALTER_USER; + + return msgLen; +} + +int tscBuildCfgDnodeMsg(SSqlObj *pSql) { + SCfgMsg *pCfg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pCfg = (SCfgMsg *)pMsg; + strcpy(pCfg->ip, pCmd->name); + strcpy(pCfg->config, pCmd->payload); + + pMsg += sizeof(SCfgMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CFG_PNODE; + + return msgLen; +} + +int tscBuildDropDbMsg(SSqlObj *pSql) { + SDropDbMsg *pDropDbMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pDropDbMsg = (SDropDbMsg *)pMsg; + strcpy(pDropDbMsg->db, pCmd->name); + + pDropDbMsg->ignoreNotExists = htons(pCmd->existsCheck ? 1 : 0); + + pMsg += sizeof(SDropDbMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_DROP_DB; + + return msgLen; +} + +int tscBuildDropUserMsg(SSqlObj *pSql) { + SDropUserMsg *pDropMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pDropMsg = (SDropUserMsg *)pMsg; + strcpy(pDropMsg->user, pCmd->name); + + pMsg += sizeof(SDropUserMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_DROP_USER; + + return msgLen; +} + +int tscBuildDropAcctMsg(SSqlObj *pSql) { + SDropAcctMsg *pDropMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pDropMsg = (SDropAcctMsg *)pMsg; + strcpy(pDropMsg->user, pCmd->name); + + pMsg += sizeof(SDropAcctMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_DROP_ACCT; + + return msgLen; +} + +int tscBuildUseDbMsg(SSqlObj *pSql) { + SUseDbMsg *pUseDbMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pUseDbMsg = (SUseDbMsg *)pMsg; + strcpy(pUseDbMsg->db, pCmd->name); + + pMsg += sizeof(SUseDbMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_USE_DB; + + return msgLen; +} + +int tscBuildShowMsg(SSqlObj *pSql) { + SShowMsg *pShowMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + + assert(pCmd->payloadLen < TSDB_SQLCMD_SIZE); + char payload[TSDB_SQLCMD_SIZE] = {0}; + memcpy(payload, pCmd->payload, pCmd->payloadLen); + + int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SShowTableMsg) + pCmd->payloadLen + TSDB_EXTRA_PAYLOAD_SIZE; + tscAllocPayloadWithSize(pCmd, size); + + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + if (pCmd->tagCond.len > 0) { + strcpy(pMgmt->db, pCmd->tagCond.pData); + } else { + strcpy(pMgmt->db, pObj->db); + } + + pMsg += sizeof(SMgmtHead); + + pShowMsg = (SShowMsg *)pMsg; + pShowMsg->type = pCmd->type; + + if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC) && + pCmd->payloadLen != 0) { + // only show tables support wildcard query + pShowMsg->payloadLen = htons(pCmd->payloadLen); + memcpy(pShowMsg->payload, payload, pCmd->payloadLen); + } + + pMsg += (sizeof(SShowTableMsg) + pCmd->payloadLen); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_SHOW; + + assert(msgLen + minMsgSize() <= size); + return msgLen; +} + +int tscBuildKillQueryMsg(SSqlObj *pSql) { + SKillQuery *pKill; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pKill = (SKillQuery *)pMsg; + pKill->handle = 0; + strcpy(pKill->queryId, pCmd->payload); + + pMsg += sizeof(SKillQuery); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_KILL_QUERY; + + return msgLen; +} + +int tscBuildKillStreamMsg(SSqlObj *pSql) { + SKillStream *pKill; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pKill = (SKillStream *)pMsg; + pKill->handle = 0; + strcpy(pKill->queryId, pCmd->payload); + + pMsg += sizeof(SKillStream); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_KILL_STREAM; + + return msgLen; +} + +int tscBuildKillConnectionMsg(SSqlObj *pSql) { + SKillConnection *pKill; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pKill = (SKillStream *)pMsg; + pKill->handle = 0; + strcpy(pKill->queryId, pCmd->payload); + + pMsg += sizeof(SKillStream); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_KILL_CONNECTION; + + return msgLen; +} + +int tscEstimateCreateTableMsgLength(SSqlObj *pSql) { + SSqlCmd *pCmd = &(pSql->cmd); + + int32_t size = minMsgSize() + sizeof(SMgmtHead) + sizeof(SCreateTableMsg); + + if (pCmd->numOfCols == 0 && pCmd->count == 0) { + size += sizeof(STagData); + } else { + size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count); + } + + if (strlen(pCmd->payload) > 0) size += strlen(pCmd->payload) + 1; + + return size + TSDB_EXTRA_PAYLOAD_SIZE; +} + +int tscBuildCreateTableMsg(SSqlObj *pSql) { + SCreateTableMsg *pCreateTableMsg; + char * pMsg, *pStart; + int msgLen = 0; + SSchema * pSchema; + int size = 0; + + // tmp variable to + // 1. save tags data in order to avoid too long tag values overlapped by header + // 2. save the selection clause, in create table as .. sql string + char *tmpData = calloc(1, pSql->cmd.allocSize); + + // STagData is in binary format, strncpy is not available + memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); + + SSqlCmd *pCmd = &pSql->cmd; + + // Reallocate the payload size + size = tscEstimateCreateTableMsgLength(pSql); + tscAllocPayloadWithSize(pCmd, size); + + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + // use dbinfo from meterid without modifying current db info + tscGetDBInfoFromMeterId(pSql->cmd.name, pMgmt->db); + + pMsg += sizeof(SMgmtHead); + + pCreateTableMsg = (SCreateTableMsg *)pMsg; + strcpy(pCreateTableMsg->meterId, pCmd->name); + + pCreateTableMsg->igExists = pCmd->existsCheck ? 1 : 0; + pCreateTableMsg->numOfColumns = htons(pCmd->numOfCols); + pCreateTableMsg->numOfTags = htons(pCmd->count); + pMsg = (char *)pCreateTableMsg->schema; + + pCreateTableMsg->sqlLen = 0; + short sqlLen = (short)(strlen(tmpData) + 1); + + if (pCmd->numOfCols == 0 && pCmd->count == 0) { + // create by using metric, tags value + memcpy(pMsg, tmpData, sizeof(STagData)); + pMsg += sizeof(STagData); + } else { + // create metric/create normal meter + pSchema = pCreateTableMsg->schema; + for (int i = 0; i < pCmd->numOfCols + pCmd->count; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + pSchema->type = pField->type; + strcpy(pSchema->name, pField->name); + pSchema->bytes = htons(pField->bytes); + pSchema++; + } + + pMsg = (char *)pSchema; + + // check if it is a stream sql + if (sqlLen > 1) { + memcpy(pMsg, tmpData, sqlLen); + pMsg[sqlLen - 1] = 0; + + pCreateTableMsg->sqlLen = htons(sqlLen); + pMsg += sqlLen; + } + } + + tfree(tmpData); + tscClearFieldInfo(pCmd); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CREATE_TABLE; + + assert(msgLen + minMsgSize() <= size); + return msgLen; +} + +int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) { + return minMsgSize() + sizeof(SMgmtHead) + sizeof(SAlterTableMsg) + sizeof(SSchema) * pCmd->numOfCols + + TSDB_EXTRA_PAYLOAD_SIZE; +} + +int tscBuildAlterTableMsg(SSqlObj *pSql) { + SAlterTableMsg *pAlterTableMsg; + char * pMsg, *pStart; + int msgLen = 0; + int size = 0; + + SSqlCmd *pCmd = &pSql->cmd; + + char buf[TSDB_MAX_TAGS_LEN] = {0}; + int32_t len = (TSDB_MAX_TAGS_LEN < pCmd->allocSize)? TSDB_MAX_TAGS_LEN:pCmd->allocSize; + memcpy(buf, pCmd->payload, len); + + size = tscEstimateAlterTableMsgLength(pCmd); + tscAllocPayloadWithSize(pCmd, size); + + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + tscGetDBInfoFromMeterId(pCmd->name, pMgmt->db); + pMsg += sizeof(SMgmtHead); + + pAlterTableMsg = (SAlterTableMsg *)pMsg; + strcpy(pAlterTableMsg->meterId, pCmd->name); + pAlterTableMsg->type = htons(pCmd->count); + pAlterTableMsg->numOfCols = htons(pCmd->numOfCols); + memcpy(pAlterTableMsg->tagVal, buf, TSDB_MAX_TAGS_LEN); + + SSchema *pSchema = pAlterTableMsg->schema; + for (int i = 0; i < pCmd->numOfCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + pSchema->type = pField->type; + strcpy(pSchema->name, pField->name); + pSchema->bytes = htons(pField->bytes); + pSchema++; + } + + pMsg = (char *)pSchema; + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_ALTER_TABLE; + + assert(msgLen + minMsgSize() <= size); + return msgLen; +} + +int tscAlterDbMsg(SSqlObj *pSql) { + SAlterDbMsg *pAlterDbMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pStart = pCmd->payload + tsRpcHeadSize; + pMsg = pStart; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pAlterDbMsg = (SAlterDbMsg *)pMsg; + strcpy(pAlterDbMsg->db, pCmd->name); + + pAlterDbMsg->replications = pCmd->defaultVal[0]; + pAlterDbMsg->daysPerFile = htonl(pCmd->defaultVal[1]); + pAlterDbMsg->daysToKeep = htonl(pCmd->defaultVal[2]); + + pMsg += sizeof(SAlterDbMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_ALTER_DB; + + return msgLen; +} + +int tscBuildDropTableMsg(SSqlObj *pSql) { + SDropTableMsg *pDropTableMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + tscGetDBInfoFromMeterId(pCmd->name, pMgmt->db); + pMsg += sizeof(SMgmtHead); + + pDropTableMsg = (SDropTableMsg *)pMsg; + strcpy(pDropTableMsg->meterId, pCmd->name); + + pDropTableMsg->igNotExists = pCmd->existsCheck ? 1 : 0; + pMsg += sizeof(SDropTableMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_DROP_TABLE; + + return msgLen; +} + +int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql) { + char *pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + if (pCmd->tagCond.len > 0) { + strcpy(pMgmt->db, pCmd->tagCond.pData); + } else { + strcpy(pMgmt->db, pObj->db); + } + pMsg += sizeof(SMgmtHead); + + *((uint64_t *)pMsg) = pSql->res.qhandle; + pMsg += 8; + *pMsg = pCmd->type; + pMsg += 1; + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_RETRIEVE; + + return msgLen; +} + +static int tscSetResultPointer(SSqlCmd *pCmd, SSqlRes *pRes) { + if (tscCreateResPointerInfo(pCmd, pRes) != TSDB_CODE_SUCCESS) { + return pRes->code; + } + + for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + int16_t offset = tscFieldInfoGetOffset(pCmd, i); + + pRes->bytes[i] = pField->bytes; + if (pCmd->order.order == TSQL_SO_DESC) { + pRes->bytes[i] = -pRes->bytes[i]; + pRes->tsrow[i] = ((pRes->data + offset * pRes->numOfRows) + (pRes->numOfRows - 1) * pField->bytes); + } else { + pRes->tsrow[i] = (pRes->data + offset * pRes->numOfRows); + } + } + + return 0; +} + +/* + * this function can only be called once. + * by using pRes->rspType to denote its status + * + * if pRes->rspType is 1, no more result + */ +static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) { + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + pRes->code = TSDB_CODE_SUCCESS; + + if (pRes->rspType == 0) { + pRes->numOfRows = numOfRes; + pRes->row = 0; + pRes->rspType = 1; + + tscSetResultPointer(pCmd, pRes); + pRes->row = 0; + + } else { + pRes->numOfRows = 0; + pRes->row = 0; + } + + uint8_t code = pSql->res.code; + if (pSql->fp) { + if (code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, pSql->res.numOfRows); + } else { + tscQueueAsyncRes(pSql); + } + } + + return code; +} + +int tscProcessDescribeTableRsp(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + int32_t numOfRes = pCmd->pMeterMeta->numOfColumns + pCmd->pMeterMeta->numOfTags; + + return tscLocalResultCommonBuilder(pSql, numOfRes); +} + +int tscProcessTagRetrieveRsp(SSqlObj *pSql) { + SSqlCmd *pCmd = &pSql->cmd; + int32_t numOfRes = 0; + if (tscSqlExprGet(pCmd, 0)->sqlFuncId == TSDB_FUNC_TAGPRJ) { + numOfRes = pCmd->pMetricMeta->numOfMeters; + } else { + numOfRes = 1; // for count function, there is only one output. + } + return tscLocalResultCommonBuilder(pSql, numOfRes); +} + +int tscProcessRetrieveMetricRsp(SSqlObj *pSql) { + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + pRes->code = tscLocalDoReduce(pSql); + + if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) { + tscSetResultPointer(pCmd, pRes); + } + + pRes->row = 0; + + uint8_t code = pSql->res.code; + if (pSql->fp) { // async retrieve metric data + if (pSql->res.code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, pSql->res.numOfRows); + } else { + tscQueueAsyncRes(pSql); + } + } + + return code; +} + +int tscProcessEmptyResultRsp(SSqlObj *pSql) { + return tscLocalResultCommonBuilder(pSql, 0); +} + +int tscBuildConnectMsg(SSqlObj *pSql) { + SConnectMsg *pConnect; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + pConnect = (SConnectMsg *)pMsg; + + char *db; // ugly code to move the space + db = strstr(pObj->db, TS_PATH_DELIMITER); + db = (db == NULL) ? pObj->db : db + 1; + strcpy(pConnect->db, db); + + pMsg += sizeof(SConnectMsg); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_CONNECT; + + return msgLen; +} + +int tscBuildMeterMetaMsg(SSqlObj *pSql) { + SMeterInfoMsg *pInfoMsg; + char * pMsg, *pStart; + int msgLen = 0; + + char *tmpData = 0; + if (pSql->cmd.allocSize > 0) { + tmpData = calloc(1, pSql->cmd.allocSize); + // STagData is in binary format, strncpy is not available + memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); + } + + SSqlCmd *pCmd = &pSql->cmd; + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + tscGetDBInfoFromMeterId(pCmd->name, pMgmt->db); + + pMsg += sizeof(SMgmtHead); + + pInfoMsg = (SMeterInfoMsg *)pMsg; + strcpy(pInfoMsg->meterId, pCmd->name); + pInfoMsg->createFlag = htons((uint16_t)pCmd->defaultVal[0]); + pMsg += sizeof(SMeterInfoMsg); + + if (pCmd->defaultVal[0] != 0) { + memcpy(pInfoMsg->tags, tmpData, sizeof(STagData)); + pMsg += sizeof(STagData); + } + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_METERINFO; + + tfree(tmpData); + + assert(msgLen + minMsgSize() <= pCmd->allocSize); + return msgLen; +} + +static int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) { + const int32_t defaultSize = + minMsgSize() + sizeof(SMetricMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS; + + int32_t tagLen = pCmd->tagCond.len * TSDB_NCHAR_SIZE; + if (tagLen + defaultSize > TSDB_DEFAULT_PAYLOAD_SIZE) { + return tagLen + defaultSize; + } else { + return TSDB_DEFAULT_PAYLOAD_SIZE; + } +} + +int tscBuildMetricMetaMsg(SSqlObj *pSql) { + SMetricMetaMsg *pMetaMsg; + char * pMsg, *pStart; + int msgLen = 0; + + SSqlCmd *pCmd = &pSql->cmd; + + int32_t size = tscEstimateMetricMetaMsgSize(pCmd); + tscAllocPayloadWithSize(pCmd, size); + + pStart = pCmd->payload + tsRpcHeadSize; + pMsg = pStart; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + tscGetDBInfoFromMeterId(pCmd->name, pMgmt->db); + + pMsg += sizeof(SMgmtHead); + + pMetaMsg = (SMetricMetaMsg *)pMsg; + strcpy(pMetaMsg->meterId, pCmd->name); + + pMetaMsg->type = htons(pCmd->tagCond.type); + pMetaMsg->condLength = htonl(pCmd->tagCond.len); + + if (pCmd->tagCond.len > 0) { + /* convert to unicode before sending to mnode for metric query */ + bool ret = taosMbsToUcs4(tsGetMetricQueryCondPos(&pCmd->tagCond), pCmd->tagCond.len, (char *)pMetaMsg->tags, + pCmd->tagCond.len * TSDB_NCHAR_SIZE); + if (!ret) { + tscError("%p mbs to ucs4 failed:%s", pSql, tsGetMetricQueryCondPos(&pCmd->tagCond)); + return 0; + } + } + + pMsg += sizeof(SMetricMetaMsg); + pMsg += pCmd->tagCond.len * TSDB_NCHAR_SIZE; + + SSqlGroupbyExpr *pGroupby = &pCmd->groupbyExpr; + + pMetaMsg->limit = htobe64(pCmd->glimit.limit); + pMetaMsg->offset = htobe64(pCmd->glimit.offset); + pMetaMsg->numOfTags = htons(pCmd->numOfReqTags); + pMetaMsg->numOfGroupbyCols = htons(pGroupby->numOfGroupbyCols); + + for (int32_t j = 0; j < pCmd->numOfReqTags; ++j) { + pMetaMsg->tagCols[j] = htons(pCmd->tagColumnIndex[j]); + } + + if (pGroupby->numOfGroupbyCols != 0) { + pMetaMsg->orderIndex = htons(pGroupby->orderIdx); + pMetaMsg->orderType = htons(pGroupby->orderType); + + for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupbyCols; ++j) { + *((int16_t *)pMsg) = htons(pGroupby->tagIndex[j]); + pMsg += sizeof(pCmd->groupbyExpr.tagIndex[j]); + } + } + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_METRIC_META; + + assert(msgLen + minMsgSize() <= size); + return msgLen; +} + +int tscEstimateBuildHeartBeatMsgLength(SSqlObj *pSql) { + int size = 0; + STscObj *pObj = pSql->pTscObj; + + size += tsRpcHeadSize + sizeof(SMgmtHead); + size += sizeof(SQList); + + SSqlObj *tpSql = pObj->sqlList; + while (tpSql) { + size += sizeof(SQDesc); + tpSql = tpSql->next; + } + + size += sizeof(SSList); + SSqlStream *pStream = pObj->streamList; + while (pStream) { + size += sizeof(SSDesc); + pStream = pStream->next; + } + + return size + TSDB_EXTRA_PAYLOAD_SIZE; +} + +int tscBuildHeartBeatMsg(SSqlObj *pSql) { + char *pMsg, *pStart; + int msgLen = 0; + int size = 0; + + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + + pthread_mutex_lock(&pObj->mutex); + + size = tscEstimateBuildHeartBeatMsgLength(pSql); + tscAllocPayloadWithSize(pCmd, size); + + pMsg = pCmd->payload + tsRpcHeadSize; + pStart = pMsg; + + SMgmtHead *pMgmt = (SMgmtHead *)pMsg; + strcpy(pMgmt->db, pObj->db); + pMsg += sizeof(SMgmtHead); + + pMsg = tscBuildQueryStreamDesc(pMsg, pObj); + pthread_mutex_unlock(&pObj->mutex); + + msgLen = pMsg - pStart; + pCmd->payloadLen = msgLen; + pCmd->msgType = TSDB_MSG_TYPE_HEARTBEAT; + + assert(msgLen + minMsgSize() <= size); + return msgLen; +} + +int tscProcessRetrieveRspFromMgmt(SSqlObj *pSql) { + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + STscObj *pObj = pSql->pTscObj; + + SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)(pRes->pRsp); + pRes->numOfRows = htonl(pRetrieve->numOfRows); + pRes->precision = htons(pRes->precision); + + pRes->data = pRetrieve->data; + + tscSetResultPointer(pCmd, pRes); + + if (pRes->numOfRows == 0) { + taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); + pSql->thandle = NULL; + } + + pRes->row = 0; + return 0; +} + +int tscProcessMeterMetaRsp(SSqlObj *pSql) { + SMeterMeta *pMeta; + SSchema * pSchema; + uint8_t ieType; + + char *rsp = pSql->res.pRsp; + + ieType = *rsp; + if (ieType != TSDB_IE_TYPE_META) { + tscError("invalid ie type:%d", ieType); + return TSDB_CODE_INVALID_IE; + } + + rsp++; + pMeta = (SMeterMeta *)rsp; + + pMeta->sid = htonl(pMeta->sid); + pMeta->sversion = htonl(pMeta->sversion); + pMeta->vgid = htonl(pMeta->vgid); + pMeta->uid = htobe64(pMeta->uid); + + if (pMeta->sid < 0 || pMeta->vgid < 0) { + tscError("invalid meter vgid:%d, sid%d", pMeta->vgid, pMeta->sid); + return TSDB_CODE_INVALID_VALUE; + } + + pMeta->numOfColumns = htons(pMeta->numOfColumns); + pMeta->numOfTags = htons(pMeta->numOfTags); + pMeta->precision = htons(pMeta->precision); + pMeta->meterType = htons(pMeta->meterType); + + if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) { + tscError("invalid tag value count:%d", pMeta->numOfTags); + return TSDB_CODE_INVALID_VALUE; + } + + if (pMeta->numOfTags > TSDB_MAX_TAGS || pMeta->numOfTags < 0) { + tscError("invalid numOfTags:%d", pMeta->numOfTags); + return TSDB_CODE_INVALID_VALUE; + } + + if (pMeta->numOfColumns > TSDB_MAX_COLUMNS || pMeta->numOfColumns < 0) { + tscError("invalid numOfColumns:%d", pMeta->numOfColumns); + return TSDB_CODE_INVALID_VALUE; + } + + for (int i = 0; i < TSDB_VNODES_SUPPORT; ++i) { + pMeta->vpeerDesc[i].vnode = htonl(pMeta->vpeerDesc[i].vnode); + } + + pMeta->rowSize = 0; + rsp += sizeof(SMeterMeta); + pSchema = (SSchema *)rsp; + + int32_t numOfTotalCols = pMeta->numOfColumns + pMeta->numOfTags; + for (int i = 0; i < numOfTotalCols; ++i) { + pSchema->bytes = htons(pSchema->bytes); + pSchema->colId = htons(pSchema->colId); + + // ignore the tags length + if (i < pMeta->numOfColumns) { + pMeta->rowSize += pSchema->bytes; + } + pSchema++; + } + + rsp += numOfTotalCols * sizeof(SSchema); + + int32_t tagLen = 0; + SSchema *pTagsSchema = tsGetSchemaColIdx(pMeta, pMeta->numOfColumns); + + if (pMeta->meterType == TSDB_METER_MTABLE) { + for (int32_t i = 0; i < pMeta->numOfTags; ++i) { + tagLen += pTagsSchema[i].bytes; + } + pMeta->tags = sizeof(SMeterMeta) + numOfTotalCols * sizeof(SSchema); + } + + rsp += tagLen; + int32_t size = (int32_t)(rsp - (char *)pMeta); + + // pMeta->index = rand() % TSDB_VNODES_SUPPORT; + pMeta->index = 0; + + // todo add one more function: taosAddDataIfNotExists(); + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMeterMeta), false); + + pSql->cmd.pMeterMeta = + (SMeterMeta *)taosAddDataIntoCache(tscCacheHandle, pSql->cmd.name, (char *)pMeta, size, tsMeterMetaKeepTimer); + if (pSql->cmd.pMeterMeta == NULL) return 0; + + return TSDB_CODE_OTHERS; +} + +int tscProcessMetricMetaRsp(SSqlObj *pSql) { + SMetricMeta *pMeta; + uint8_t ieType; + char * rsp = pSql->res.pRsp; + + ieType = *rsp; + if (ieType != TSDB_IE_TYPE_META) { + tscError("invalid ie type:%d", ieType); + return TSDB_CODE_INVALID_IE; + } + + rsp++; + pMeta = (SMetricMeta *)rsp; + size_t size = (size_t)pSql->res.rspLen - 1; + rsp = rsp + sizeof(SMetricMeta); + + pMeta->numOfMeters = htonl(pMeta->numOfMeters); + pMeta->numOfVnodes = htonl(pMeta->numOfVnodes); + pMeta->tagLen = htons(pMeta->tagLen); + + size += pMeta->numOfVnodes * sizeof(SVnodeSidList *) + pMeta->numOfMeters * sizeof(SMeterSidExtInfo *); + + char *pStr = calloc(1, size); + + SMetricMeta *pNewMetricMeta = (SMetricMeta *)pStr; + pNewMetricMeta->numOfMeters = pMeta->numOfMeters; + pNewMetricMeta->numOfVnodes = pMeta->numOfVnodes; + pNewMetricMeta->tagLen = pMeta->tagLen; + + pStr = pStr + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *); + + for (int32_t i = 0; i < pMeta->numOfVnodes; ++i) { + SVnodeSidList *pSidLists = (SVnodeSidList *)rsp; + memcpy(pStr, pSidLists, sizeof(SVnodeSidList)); + + pNewMetricMeta->list[i] = pStr - (char *)pNewMetricMeta; // offset value + SVnodeSidList *pLists = (SVnodeSidList *)pStr; + + tscTrace("%p metricmeta:vid:%d,numOfMeters:%d", pSql, i, pLists->numOfSids); + + pStr += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids; + rsp += sizeof(SVnodeSidList); + + size_t sidSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen; + for (int32_t j = 0; j < pSidLists->numOfSids; ++j) { + pLists->pSidExtInfoList[j] = pStr - (char *)pLists; + memcpy(pStr, rsp, sidSize); + + rsp += sidSize; + pStr += sidSize; + } + } + + char name[TSDB_MAX_TAGS_LEN + 1] = {0}; + tscGetMetricMetaCacheKey(&pSql->cmd, name); + + /* release the used metricmeta */ + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMetricMeta), false); + + pSql->cmd.pMetricMeta = + (SMetricMeta *)taosAddDataIntoCache(tscCacheHandle, name, (char *)pNewMetricMeta, size, tsMetricMetaKeepTimer); + tfree(pNewMetricMeta); + + if (pSql->cmd.pMetricMeta == NULL) { + return 0; + } + + return TSDB_CODE_OTHERS; +} + +/* + * current process do not use the cache at all + */ +int tscProcessShowRsp(SSqlObj *pSql) { + SMeterMeta * pMeta; + SShowRspMsg *pShow; + SSchema * pSchema; + char key[20]; + + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + pShow = (SShowRspMsg *)pRes->pRsp; + pRes->qhandle = pShow->qhandle; + + pRes->numOfRows = 0; + pRes->row = 0; + pMeta = &(pShow->meterMeta); + + pMeta->numOfColumns = ntohs(pMeta->numOfColumns); + + pSchema = (SSchema *)((char *)pMeta + sizeof(SMeterMeta)); + pMeta->sid = ntohs(pMeta->sid); + for (int i = 0; i < pMeta->numOfColumns; ++i) { + pSchema->bytes = htons(pSchema->bytes); + pSchema++; + } + + key[0] = pCmd->type + 'a'; + strcpy(key + 1, "showlist"); + + taosRemoveDataFromCache(tscCacheHandle, (void *)&(pCmd->pMeterMeta), false); + + int32_t size = pMeta->numOfColumns * sizeof(SSchema) + sizeof(SMeterMeta); + pCmd->pMeterMeta = (SMeterMeta *)taosAddDataIntoCache(tscCacheHandle, key, (char *)pMeta, size, tsMeterMetaKeepTimer); + pCmd->numOfCols = pCmd->fieldsInfo.numOfOutputCols; + SSchema *pMeterSchema = tsGetSchema(pCmd->pMeterMeta); + + tscColumnInfoReserve(pCmd, pMeta->numOfColumns); + for (int i = 0; i < pMeta->numOfColumns; ++i) { + tscColumnInfoInsert(pCmd, i); + tscFieldInfoSetValFromSchema(&pCmd->fieldsInfo, i, &pMeterSchema[i]); + } + + tscFieldInfoCalOffset(pCmd); + return 0; +} + +int tscProcessConnectRsp(SSqlObj *pSql) { + char temp[TSDB_METER_ID_LEN]; + SConnectRsp *pConnect; + + STscObj *pObj = pSql->pTscObj; + SSqlRes *pRes = &pSql->res; + + pConnect = (SConnectRsp *)pRes->pRsp; + strcpy(pObj->acctId, pConnect->acctId); // copy acctId from response + sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db); + strcpy(pObj->db, temp); + + strcpy(pObj->sversion, pConnect->version); + pObj->writeAuth = pConnect->writeAuth; + pObj->superAuth = pConnect->superAuth; + taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer); + + return 0; +} + +int tscProcessUseDbRsp(SSqlObj *pSql) { + STscObj *pObj = pSql->pTscObj; + strcpy(pObj->db, pSql->cmd.name); + return 0; +} + +int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) { + taosClearDataCache(tscCacheHandle); + return 0; +} + +int tscProcessDropTableRsp(SSqlObj *pSql) { + SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pSql->cmd.name); + if (pMeterMeta == NULL) { + /* not in cache, abort */ + return 0; + } + + /* + * 1. if a user drops one table, which is the only table in a vnode, remove operation will incur vnode to be removed. + * 2. Then, a user creates a new metric followed by a table with identical name of removed table but different schema, + * here the table will reside in a new vnode. + * The cached information is expired, however, we may have lost the ref of original meter. So, clear whole cache instead. + */ + tscTrace("%p force release metermeta after drop table:%s", pSql, pSql->cmd.name); + taosRemoveDataFromCache(tscCacheHandle, (void **)&pMeterMeta, true); + + if (pSql->cmd.pMeterMeta) { + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMeterMeta), true); + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMetricMeta), true); + } + + return 0; +} + +int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { + SMeterMeta *pMeterMeta = taosGetDataFromCache(tscCacheHandle, pSql->cmd.name); + if (pMeterMeta == NULL) { /* not in cache, abort */ + return 0; + } + + tscTrace("%p force release metermeta in cache after alter-table: %s", pSql, pSql->cmd.name); + taosRemoveDataFromCache(tscCacheHandle, (void **)&pMeterMeta, true); + + if (pSql->cmd.pMeterMeta) { + bool isMetric = UTIL_METER_IS_METRIC(&pSql->cmd); + + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMeterMeta), true); + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMetricMeta), true); + + if (isMetric) { + // here, the pCmd->pMeterMeta == NULL + // if it is a metric, reset whole query cache + tscTrace("%p reset query cache since table:%s is metric", pSql, pSql->cmd.name); + taosClearDataCache(tscCacheHandle); + } + } + + return 0; +} + +int tscProcessAlterDbMsgRsp(SSqlObj *pSql) { + UNUSED(pSql); + return 0; +} + +int tscProcessQueryRsp(SSqlObj *pSql) { + SSqlRes *pRes = &pSql->res; + + pRes->qhandle = *((uint64_t *)pRes->pRsp); + pRes->numOfRows = 0; + pRes->row = 0; + pRes->data = NULL; + return 0; +} + +int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { + SSqlRes * pRes = &pSql->res; + SSqlCmd * pCmd = &pSql->cmd; + STscObj * pObj = pSql->pTscObj; + SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)pRes->pRsp; + + pRes->numOfRows = htonl(pRetrieve->numOfRows); + pRes->precision = htons(pRetrieve->precision); + pRes->offset = htobe64(pRetrieve->offset); + + pRes->data = pRetrieve->data; + pRes->useconds = pRetrieve->useconds; + + tscSetResultPointer(pCmd, pRes); + pRes->row = 0; + + if (pRes->numOfRows == 0 && !(tscProjectionQueryOnMetric(pSql) && pRes->offset > 0)) { + taosAddConnIntoCache(tscConnCache, pSql->thandle, pSql->ip, pSql->vnode, pObj->user); + pSql->thandle = NULL; + } else { + tscTrace("%p numOfRows:%d, offset:%d, not recycle connection", pSql, pRes->numOfRows, pRes->offset); + } + + return 0; +} + +int tscProcessRetrieveRspFromLocal(SSqlObj *pSql) { + SSqlRes * pRes = &pSql->res; + SSqlCmd * pCmd = &pSql->cmd; + SRetrieveMeterRsp *pRetrieve = (SRetrieveMeterRsp *)pRes->pRsp; + + pRes->numOfRows = htonl(pRetrieve->numOfRows); + pRes->data = pRetrieve->data; + + tscSetResultPointer(pCmd, pRes); + pRes->row = 0; + return 0; +} + +void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code); + +static int32_t tscDoGetMeterMeta(SSqlObj *pSql, char *meterId) { + int32_t code = TSDB_CODE_SUCCESS; + + SSqlObj *pNew = malloc(sizeof(SSqlObj)); + memset(pNew, 0, sizeof(SSqlObj)); + pNew->pTscObj = pSql->pTscObj; + pNew->signature = pNew; + pNew->cmd.command = TSDB_SQL_META; + pNew->cmd.payload = NULL; + pNew->cmd.allocSize = 0; + pNew->cmd.defaultVal[0] = pSql->cmd.defaultVal[0]; // flag of create table if not exists + tscAllocPayloadWithSize(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); + + strcpy(pNew->cmd.name, meterId); + memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); + tscTrace("%p new pSqlObj:%p to get meterMeta", pSql, pNew); + + if (pSql->fp == NULL) { + sem_init(&pNew->rspSem, 0, 0); + sem_init(&pNew->emptyRspSem, 0, 1); + + code = tscProcessSql(pNew); + if (code == TSDB_CODE_SUCCESS) { + /* update cache only on success get metermeta */ + assert(pSql->cmd.pMeterMeta == NULL); + pSql->cmd.pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, meterId); + } + + tscTrace("%p get meter meta complete, code:%d, pMeterMeta:%p", pSql, code, pSql->cmd.pMeterMeta); + tscFreeSqlObj(pNew); + + } else { + pNew->fp = tscMeterMetaCallBack; + pNew->param = pSql; + + int32_t len = strlen(pSql->sqlstr); + pNew->sqlstr = malloc(len + 1); + strcpy(pNew->sqlstr, pSql->sqlstr); + pNew->sqlstr[len] = 0; + + code = tscProcessSql(pNew); + if (code == TSDB_CODE_SUCCESS) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + } + + return code; +} + +int tscGetMeterMeta(SSqlObj *pSql, char *meterId) { + SSqlCmd *pCmd = &pSql->cmd; + + /* if the SSqlCmd owns a metermeta, release it first */ + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pCmd->pMeterMeta), false); + pCmd->pMeterMeta = (SMeterMeta *)taosGetDataFromCache(tscCacheHandle, meterId); + if (pCmd->pMeterMeta != NULL) { + tscTrace("%p the number of columns:%d, numOfTags:%d, addr:%p", pSql, pCmd->pMeterMeta->numOfColumns, + pCmd->pMeterMeta->numOfTags, pCmd->pMeterMeta); + return TSDB_CODE_SUCCESS; + } + + /* + * for async insert operation, release data block buffer before issue new object to get metermeta + * because in metermeta callback function, the tscParse function will generate the submit data blocks + */ + if (pSql->fp != NULL && pSql->pStream == NULL) { + tscfreeSqlCmdData(pCmd); + } + + return tscDoGetMeterMeta(pSql, meterId); +} + +int tscGetMeterMetaEx(SSqlObj *pSql, char *meterId, bool createIfNotExists) { + pSql->cmd.defaultVal[0] = createIfNotExists ? 1 : 0; + return tscGetMeterMeta(pSql, meterId); +} + +/* + * in handling the renew metermeta problem during insertion, + * If the meter is created on demand during insertion, the routine usually waits for a short + * period to re-issue the getMeterMeta msg, in which makes a greater change that vnode has + * successfully created the corresponding table. + */ +static void tscWaitingForCreateTable(SSqlCmd *pCmd) { + int32_t CREATE_METER_ON_DEMAND = 1; + if (pCmd->command == TSDB_SQL_INSERT && pCmd->defaultVal[0] == CREATE_METER_ON_DEMAND) { + taosMsleep(50); // todo: global config + } +} + +/** + * in renew metermeta, do not retrieve metadata in cache. + * @param pSql sql object + * @param meterId meter id + * @return status code + */ +int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) { + int code = 0; + + // handle metric meta renew process + SSqlCmd *pCmd = &pSql->cmd; + + // enforce the renew metermeta operation in async model + if (pSql->fp == NULL) pSql->fp = (void *)0x1; + + /* + * 1. nly update the metermeta in force model metricmeta is not updated + * 2. if get metermeta failed, still get the metermeta + */ + if (pCmd->pMeterMeta == NULL || !tscQueryOnMetric(pCmd)) { + if (pCmd->pMeterMeta) { + tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%d, addr:%p", + pSql, pCmd->pMeterMeta->numOfTags, pCmd->numOfCols, pCmd->pMeterMeta->uid, pCmd->pMeterMeta); + } + + tscWaitingForCreateTable(&pSql->cmd); + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMeterMeta), true); + + code = tscDoGetMeterMeta(pSql, meterId); + } else { + tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%d, addr:%p", + pSql, pCmd->pMeterMeta->numOfTags, pCmd->numOfCols, pCmd->pMeterMeta->uid, pCmd->pMeterMeta); + } + + if (code != TSDB_CODE_ACTION_IN_PROGRESS) { + if (pSql->fp == (void *)0x1) { + pSql->fp = NULL; + } + } + + return code; +} + +int tscGetMetricMeta(SSqlObj *pSql, char *meterId) { + int code = TSDB_CODE_NETWORK_UNAVAIL; + char tagstr[TSDB_MAX_TAGS_LEN + 1] = {0}; + + /* + * the vnode query condition is serialized into pCmd->payload, we need to rebuild key for metricmeta info in cache. + */ + tscGetMetricMetaCacheKey(&pSql->cmd, tagstr); + taosRemoveDataFromCache(tscCacheHandle, (void **)&(pSql->cmd.pMetricMeta), false); + + SMetricMeta *ppMeta = (SMetricMeta *)taosGetDataFromCache(tscCacheHandle, tagstr); + if (ppMeta != NULL) { + pSql->cmd.pMetricMeta = ppMeta; + return TSDB_CODE_SUCCESS; + } + + SSqlObj *pNew = malloc(sizeof(SSqlObj)); + memset(pNew, 0, sizeof(SSqlObj)); + pNew->pTscObj = pSql->pTscObj; + pNew->signature = pNew; + + pNew->cmd.command = TSDB_SQL_METRIC; + strcpy(pNew->cmd.name, meterId); + tscAllocPayloadWithSize(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); + + // the query condition on meter is serialized into payload + tscTagCondAssign(&pNew->cmd.tagCond, &pSql->cmd.tagCond); + + pNew->cmd.groupbyExpr = pSql->cmd.groupbyExpr; + + pNew->cmd.glimit = pSql->cmd.glimit; + pNew->cmd.order = pSql->cmd.order; + pNew->cmd.numOfReqTags = pSql->cmd.numOfReqTags; + + memcpy(pNew->cmd.tagColumnIndex, pSql->cmd.tagColumnIndex, sizeof(pSql->cmd.tagColumnIndex)); + + if (pSql->fp != NULL && pSql->pStream == NULL) { + tscfreeSqlCmdData(&pSql->cmd); + } + + tscTrace("%p allocate new pSqlObj:%p to get metricMeta", pSql, pNew); + if (pSql->fp == NULL) { + sem_init(&pNew->rspSem, 0, 0); + sem_init(&pNew->emptyRspSem, 0, 1); + + code = tscProcessSql(pNew); + pSql->cmd.pMetricMeta = taosGetDataFromCache(tscCacheHandle, tagstr); + tscFreeSqlObj(pNew); + } else { + pNew->fp = tscMeterMetaCallBack; + pNew->param = pSql; + code = tscProcessSql(pNew); + if (code == TSDB_CODE_SUCCESS) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + } + + return code; +} + +void tscInitMsgs() { + tscBuildMsg[TSDB_SQL_SELECT] = tscBuildQueryMsg; + tscBuildMsg[TSDB_SQL_INSERT] = tscBuildSubmitMsg; + tscBuildMsg[TSDB_SQL_FETCH] = tscBuildRetrieveMsg; + + tscBuildMsg[TSDB_SQL_CREATE_DB] = tscBuildCreateDbMsg; + tscBuildMsg[TSDB_SQL_CREATE_USER] = tscBuildCreateUserMsg; + + tscBuildMsg[TSDB_SQL_CREATE_ACCT] = tscBuildCreateAcctMsg; + tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAlterAcctMsg; + + tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg; + tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg; + tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg; + tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg; + tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg; + tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildAlterUserMsg; + tscBuildMsg[TSDB_SQL_CREATE_PNODE] = tscBuildCreateDnodeMsg; + tscBuildMsg[TSDB_SQL_DROP_PNODE] = tscBuildDropDnodeMsg; + tscBuildMsg[TSDB_SQL_CFG_PNODE] = tscBuildCfgDnodeMsg; + tscBuildMsg[TSDB_SQL_ALTER_TABLE] = tscBuildAlterTableMsg; + tscBuildMsg[TSDB_SQL_ALTER_DB] = tscAlterDbMsg; + + tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg; + tscBuildMsg[TSDB_SQL_USE_DB] = tscBuildUseDbMsg; + tscBuildMsg[TSDB_SQL_META] = tscBuildMeterMetaMsg; + tscBuildMsg[TSDB_SQL_METRIC] = tscBuildMetricMetaMsg; + + tscBuildMsg[TSDB_SQL_HB] = tscBuildHeartBeatMsg; + tscBuildMsg[TSDB_SQL_SHOW] = tscBuildShowMsg; + tscBuildMsg[TSDB_SQL_RETRIEVE] = tscBuildRetrieveFromMgmtMsg; + tscBuildMsg[TSDB_SQL_KILL_QUERY] = tscBuildKillQueryMsg; + tscBuildMsg[TSDB_SQL_KILL_STREAM] = tscBuildKillStreamMsg; + tscBuildMsg[TSDB_SQL_KILL_CONNECTION] = tscBuildKillConnectionMsg; + + tscProcessMsgRsp[TSDB_SQL_SELECT] = tscProcessQueryRsp; + tscProcessMsgRsp[TSDB_SQL_FETCH] = tscProcessRetrieveRspFromVnode; + + tscProcessMsgRsp[TSDB_SQL_DROP_DB] = tscProcessDropDbRsp; + tscProcessMsgRsp[TSDB_SQL_DROP_TABLE] = tscProcessDropTableRsp; + tscProcessMsgRsp[TSDB_SQL_CONNECT] = tscProcessConnectRsp; + tscProcessMsgRsp[TSDB_SQL_USE_DB] = tscProcessUseDbRsp; + tscProcessMsgRsp[TSDB_SQL_META] = tscProcessMeterMetaRsp; + tscProcessMsgRsp[TSDB_SQL_METRIC] = tscProcessMetricMetaRsp; + + tscProcessMsgRsp[TSDB_SQL_SHOW] = tscProcessShowRsp; + tscProcessMsgRsp[TSDB_SQL_RETRIEVE] = tscProcessRetrieveRspFromMgmt; + tscProcessMsgRsp[TSDB_SQL_DESCRIBE_TABLE] = tscProcessDescribeTableRsp; + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_TAGS] = tscProcessTagRetrieveRsp; + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp; + + tscProcessMsgRsp[TSDB_SQL_RETRIEVE_METRIC] = tscProcessRetrieveMetricRsp; + + tscProcessMsgRsp[TSDB_SQL_ALTER_TABLE] = tscProcessAlterTableMsgRsp; + tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp; + + tscKeepConn[TSDB_SQL_SHOW] = 1; + tscKeepConn[TSDB_SQL_RETRIEVE] = 1; + tscKeepConn[TSDB_SQL_SELECT] = 1; + tscKeepConn[TSDB_SQL_FETCH] = 1; + tscKeepConn[TSDB_SQL_HB] = 1; + + tscUpdateVnodeMsg[TSDB_SQL_SELECT] = tscUpdateVnodeInQueryMsg; + tscUpdateVnodeMsg[TSDB_SQL_INSERT] = tscUpdateVnodeInSubmitMsg; +} diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c new file mode 100644 index 000000000000..a8f9300cdc72 --- /dev/null +++ b/src/client/src/tscSql.c @@ -0,0 +1,738 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "tcache.h" +#include "tlog.h" +#include "trpc.h" +#include "tscProfile.h" +#include "tscSecondaryMerge.h" +#include "tscUtil.h" +#include "tsclient.h" +#include "tsocket.h" +#include "tsql.h" +#include "ttimer.h" +#include "tutil.h" + +TAOS *taos_connect_imp(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), + void *param, void **taos) { + STscObj *pObj; + + taos_init(); + + if (pTscMgmtConn == NULL || pVnodeConn == NULL) { + globalCode = TSDB_CODE_APP_ERROR; + return NULL; + } + + if (user == NULL) { + globalCode = TSDB_CODE_INVALID_ACCT; + return NULL; + } else { + size_t len = strlen(user); + if (len <= 0 || len > TSDB_USER_LEN) { + globalCode = TSDB_CODE_INVALID_ACCT; + return NULL; + } + } + + if (pass == NULL) { + globalCode = TSDB_CODE_INVALID_PASS; + return NULL; + } else { + size_t len = strlen(pass); + if (len <= 0 || len > TSDB_KEY_LEN) { + globalCode = TSDB_CODE_INVALID_PASS; + return NULL; + } + } + + if (ip && ip[0]) { + strcpy(tsServerIpStr, ip); + tsServerIp = inet_addr(ip); + } + + pObj = (STscObj *)malloc(sizeof(STscObj)); + memset(pObj, 0, sizeof(STscObj)); + pObj->signature = pObj; + + strncpy(pObj->user, user, TSDB_USER_LEN); + strncpy(pObj->pass, pass, TSDB_KEY_LEN); + pObj->mgmtPort = port ? port : tsMgmtShellPort; + + if (db) { + int32_t len = strlen(db); + /* db name is too long */ + if (len > TSDB_DB_NAME_LEN) { + free(pObj); + globalCode = TSDB_CODE_INVALID_DB; + return NULL; + } + + char tmp[TSDB_DB_NAME_LEN + 1] = {0}; + strcpy(tmp, db); + + strdequote(tmp); + strtolower(tmp, pObj->db); + } + + pthread_mutex_init(&pObj->mutex, NULL); + + SSqlObj *pSql = (SSqlObj *)malloc(sizeof(SSqlObj)); + memset(pSql, 0, sizeof(SSqlObj)); + pSql->pTscObj = pObj; + pSql->signature = pSql; + sem_init(&pSql->rspSem, 0, 0); + sem_init(&pSql->emptyRspSem, 0, 1); + pObj->pSql = pSql; + pSql->fp = fp; + pSql->param = param; + if (taos != NULL) { + *taos = pObj; + } + + pSql->cmd.command = TSDB_SQL_CONNECT; + tscAllocPayloadWithSize(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE); + + pSql->res.code = tscProcessSql(pSql); + if (fp != NULL) { + tscTrace("%p DB async connection is opening", pObj); + return pObj; + } + + if (pSql->res.code) { + taos_close(pObj); + return NULL; + } + + tscTrace("%p DB connection is opened", pObj); + return pObj; +} + +TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) { + if (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0)) { + ip = tsInternalIp; + } + + if (ip == NULL) ip = tsInternalIp; + tscTrace("try to create a connection to %s", ip); + + void *taos = taos_connect_imp(ip, user, pass, db, port, NULL, NULL, NULL); + if (taos != NULL) { + char *server_version = taos_get_server_info(taos); + if (server_version && strcmp(server_version, version) != 0) { + tscError("taos:%p, server version:%s not equal with client version:%s, close connection", + taos, server_version, version); + taos_close(taos); + globalCode = TSDB_CODE_INVALID_CLIENT_VERSION; + return NULL; + } + } + + return taos; +} + +TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), + void *param, void **taos) { + if (ip == NULL) { + ip = tsInternalIp; + } + return taos_connect_imp(ip, user, pass, db, port, fp, param, taos); +} + +void taos_close(TAOS *taos) { + STscObj *pObj = (STscObj *)taos; + + if (pObj == NULL) return; + if (pObj->signature != pObj) return; + + if (pObj->pHb != NULL) { + tscSetFreeHeatBeat(pObj); + } else { + tscCloseTscObj(pObj); + } +} + +int taos_query(TAOS *taos, char *sqlstr) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + return TSDB_CODE_DISCONNECTED; + } + + SSqlObj *pSql = pObj->pSql; + SSqlRes *pRes = &pSql->res; + + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + + tscTrace("%p SQL: %s pObj:%p", pSql, sqlstr, pObj); + + int32_t sqlLen = strlen(sqlstr); + if (sqlLen > TSDB_MAX_SQL_LEN) { + tscError("%p sql too long", pSql); + pRes->code = TSDB_CODE_INVALID_SQL; + return pRes->code; + } + + pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if (pSql->sqlstr == NULL) { + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("%p failed to malloc sql string buffer", pSql); + tscTrace("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + return pRes->code; + } + + strtolower(sqlstr, pSql->sqlstr); + pSql->sqlstr[sqlLen] = 0; + + pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); + + /* + * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. + * If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscProcessSql() + * to free connection, which may cause segment fault, when the parse phrase is not even successfully executed. + */ + pRes->qhandle = 0; + pSql->thandle = NULL; + + if (pRes->code != TSDB_CODE_SUCCESS) return pRes->code; + + tscDoQuery(pSql); + + tscTrace("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + if (pRes->code != TSDB_CODE_SUCCESS) { + tscFreeSqlObjPartial(pSql); + } + + return pRes->code; +} + +TAOS_RES *taos_use_result(TAOS *taos) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + return NULL; + } + + return pObj->pSql; +} + +int taos_result_precision(TAOS_RES *res) { + SSqlObj *pSql = (SSqlObj *)res; + if (pSql == NULL || pSql->signature != pSql) return 0; + + return pSql->res.precision; +} + +int taos_num_rows(TAOS_RES *res) { return 0; } + +int taos_num_fields(TAOS_RES *res) { + SSqlObj *pSql = (SSqlObj *)res; + if (pSql == NULL || pSql->signature != pSql) return 0; + + return pSql->cmd.fieldsInfo.numOfOutputCols; +} + +int taos_field_count(TAOS *taos) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) return 0; + + return pObj->pSql->cmd.fieldsInfo.numOfOutputCols; +} + +int taos_affected_rows(TAOS *taos) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) return 0; + + return (pObj->pSql->res.numOfRows); +} + +TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { + SSqlObj *pSql = (SSqlObj *)res; + if (pSql == NULL || pSql->signature != pSql) return 0; + + return pSql->cmd.fieldsInfo.pFields; +} + +int taos_retrieve(TAOS_RES *res) { + if (res == NULL) return 0; + SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + if (pSql == NULL || pSql->signature != pSql) return 0; + if (pRes->qhandle == 0) return 0; + + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + if (pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + tscProcessSql(pSql); + + return pRes->numOfRows; +} + +int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { + SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + STscObj *pObj = pSql->pTscObj; + + if (pRes->qhandle == 0 || pObj->pSql != pSql) { + *rows = NULL; + return 0; + } + + // Retrieve new block + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + if (pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + + tscProcessSql(pSql); + if (pRes->numOfRows == 0) { + *rows = NULL; + return 0; + } + + for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + + pRes->bytes[i] * (1 - pCmd->order.order) * (pRes->numOfRows - 1); + } + + *rows = pRes->tsrow; + + return (pCmd->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows; +} + +TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) { + SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + STscObj *pObj = pSql->pTscObj; + int wccount = 0; + + if (pRes->qhandle == 0) return NULL; + + if (pRes->row >= pRes->numOfRows) { + if (pObj->pSql != pSql) return NULL; + + pRes->row = 0; + pRes->numOfRows = 0; + pCmd->type = 0; + if (pCmd->command < TSDB_SQL_LOCAL) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + + tscProcessSql(pSql); + if (pRes->numOfRows == 0) { + return NULL; + } + + /* localreducer has handle this situation */ + if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) { + pRes->numOfTotal += pRes->numOfRows; + } + } + + for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row; + // primary key column cannot be null in interval query, no need to check + if (i == 0 && pCmd->nAggTimeInterval > 0) { + continue; + } + + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + if (isNull(pRes->tsrow[i], pField->type)) { + pRes->tsrow[i] = NULL; + } else if (pField->type == TSDB_DATA_TYPE_NCHAR) { + /* + * convert unicode to native code in a temporary buffer extra one byte for terminated symbol + */ + if (pRes->buffer[wccount] == NULL) { + pRes->buffer[wccount] = (char *)calloc(1, pField->bytes + 1); + } else { + pRes->buffer[wccount] = realloc(pRes->buffer[wccount], pField->bytes + 1); + } + + /* string terminated */ + memset(pRes->buffer[wccount], 0, pField->bytes); + + if (taosUcs4ToMbs(pRes->tsrow[i], pField->bytes, pRes->buffer[wccount])) { + pRes->tsrow[i] = pRes->buffer[wccount]; + } else { + tscError("%p charset:%s to %s. val:%ls convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, pRes->tsrow); + pRes->tsrow[i] = NULL; + } + wccount++; + } + } + + assert(wccount <= pRes->numOfnchar); + pRes->row++; + + return pRes->tsrow; +} + +TAOS_ROW taos_fetch_row(TAOS_RES *res) { + SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + if (pSql == NULL || pSql->signature != pSql) { + globalCode = TSDB_CODE_DISCONNECTED; + return NULL; + } + + // projection query on metric, pipeline retrieve data from vnode list, instead of two-stage merge + TAOS_ROW rows = taos_fetch_row_impl(res); + while (rows == NULL && tscProjectionQueryOnMetric(pSql)) { + /* reach the maximum number of output rows, abort */ + if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { + return NULL; + } + + /* + * update the limit and offset value according to current retrieval results + * Note: if pRes->offset > 0, pRes->numOfRows = 0, pRes->numOfTotal = 0; + */ + pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; + pCmd->limit.offset = pRes->offset; + + assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); + + if ((++pCmd->vnodeIdx) <= pCmd->pMetricMeta->numOfVnodes) { + pCmd->command = TSDB_SQL_SELECT; + assert(pSql->fp == NULL); + tscProcessSql(pSql); + rows = taos_fetch_row_impl(res); + } + + // check!!! + if (rows != NULL || pCmd->vnodeIdx >= pCmd->pMetricMeta->numOfVnodes) { + break; + } + } + + return rows; +} + +int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { + SSqlObj *pSql = (SSqlObj *)res; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + int nRows = 0; + + if (pSql == NULL || pSql->signature != pSql) { + globalCode = TSDB_CODE_DISCONNECTED; + *rows = NULL; + return 0; + } + + // projection query on metric, pipeline retrieve data from vnode list, instead + // of two-stage mergevnodeProcessMsgFromShell free qhandle + nRows = taos_fetch_block_impl(res, rows); + if (*rows == NULL && tscProjectionQueryOnMetric(pSql)) { + /* reach the maximum number of output rows, abort */ + if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { + return 0; + } + + /* update the limit value according to current retrieval results */ + pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal; + + if ((++pSql->cmd.vnodeIdx) <= pSql->cmd.pMetricMeta->numOfVnodes) { + pSql->cmd.command = TSDB_SQL_SELECT; + assert(pSql->fp == NULL); + tscProcessSql(pSql); + nRows = taos_fetch_block_impl(res, rows); + } + } + + return nRows; +} + +int taos_select_db(TAOS *taos, char *db) { + char sql[64]; + + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + return TSDB_CODE_DISCONNECTED; + } + + sprintf(sql, "use %s", db); + + return taos_query(taos, sql); +} + +void taos_free_result(TAOS_RES *res) { + if (res == NULL) return; + + SSqlObj *pSql = (SSqlObj *)res; + SSqlRes *pRes = &pSql->res; + SSqlCmd *pCmd = &pSql->cmd; + + tscTrace("%p start to free result", pSql); + + if (pSql->signature != pSql) return; + if (pRes == NULL || pRes->qhandle == 0) { + /* Query rsp is not received from vnode, so the qhandle is NULL */ + tscTrace("%p qhandle is null, abort free, fp:%p", pSql, pSql->fp); + if (pSql->fp != NULL) { + pSql->thandle = NULL; + tscFreeSqlObj(pSql); + tscTrace("%p Async SqlObj is freed by app", pSql); + } else { + tscFreeSqlObjPartial(pSql); + } + return; + } + + pCmd->type = 1; // set freeFlag to 1 in retrieve message if there are + // un-retrieved results + + /* + * case 1. Partial data have been retrieved from vnodes, but not all data has been retrieved yet. We need to recycle + * the connection by noticing the vnode return 0 results. + * case 2. When the query response is received from vnodes and the numOfRows is set to 0, the user calls + * taos_free_result before the taos_fetch_row is called in non-stream computing, we need to recycle the + * connection. + * case 3. If the query process is cancelled by user in stable query, tscProcessSql should not be called for each + * subquery. Because the failure of execution tsProcessSql may trigger the callback function + * be executed, and the retry efforts may result in double free the + * resources, e.g.,SRetrieveSupport + */ + if (pRes->code != TSDB_CODE_QUERY_CANCELLED && + ((pRes->numOfRows > 0 && pCmd->command < TSDB_SQL_LOCAL) || + (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows == 0 && pCmd->command == TSDB_SQL_SELECT && + pSql->pStream == NULL && pCmd->pMeterMeta != NULL))) { + pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + tscProcessSql(pSql); + + if (pSql->fp) { + pSql->freed = 1; + } else { + pSql->thandle = NULL; + + /* + * remove allocated resources and release metermeta/metricmeta references in cache + * since current query is completed + */ + tscFreeSqlObjPartial(pSql); + tscTrace("%p sql result is freed by app", pSql); + } + } else { + if (pSql->fp) { + assert(pRes->numOfRows == 0 || (pCmd->command > TSDB_SQL_LOCAL)); + pSql->thandle = NULL; + tscFreeSqlObj(pSql); + tscTrace("%p Async SqlObj is freed by app", pSql); + } else { + pSql->thandle = NULL; + + /* + * remove allocated resources and release metermeta/metricmeta references in cache + * since current query is completed + */ + tscFreeSqlObjPartial(pSql); + tscTrace("%p sql result is freed", pSql); + } + } +} + +int taos_errno(TAOS *taos) { + STscObj *pObj = (STscObj *)taos; + int code; + + if (pObj == NULL || pObj->signature != pObj) return globalCode; + + if (pObj->pSql->res.code == -1) + code = TSDB_CODE_OTHERS; + else + code = pObj->pSql->res.code; + + return code; +} + +char *taos_errstr(TAOS *taos) { + STscObj * pObj = (STscObj *)taos; + unsigned char code; + char temp[256] = {0}; + + if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode]; + + if (pObj->pSql->res.code == -1) + code = TSDB_CODE_OTHERS; + else + code = pObj->pSql->res.code; + + if (code == TSDB_CODE_INVALID_SQL) { + sprintf(temp, "invalid SQL: %s", pObj->pSql->cmd.payload); + strcpy(pObj->pSql->cmd.payload, temp); + return pObj->pSql->cmd.payload; + } else { + return tsError[code]; + } +} + +void taos_config(int debug, char *log_path) { + uDebugFlag = debug; + strcpy(logDir, log_path); +} + +char *taos_get_server_info(TAOS *taos) { + STscObj *pObj = (STscObj *)taos; + + if (pObj == NULL) return NULL; + + return pObj->sversion; +} + +char *taos_get_client_info() { return version; } + +void taos_stop_query(TAOS_RES *res) { + if (res == NULL) return; + + SSqlObj *pSql = (SSqlObj *)res; + if (pSql->signature != pSql) return; + tscTrace("%p start to cancel query", res); + + pSql->res.code = TSDB_CODE_QUERY_CANCELLED; + + if (tscIsTwoStageMergeMetricQuery(pSql)) { + tscKillMetricQuery(pSql); + return; + } + + if (pSql->cmd.command >= TSDB_SQL_LOCAL) { + return; + } + + if (pSql->thandle == NULL) { + tscTrace("%p no connection, abort cancel", res); + return; + } + + taosStopRpcConn(pSql->thandle); + tscTrace("%p query is cancelled", res); +} + +int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { + int len = 0; + for (int i = 0; i < num_fields; ++i) { + if (row[i] == NULL) { + len += sprintf(str + len, "%s ", TSDB_DATA_NULL_STR); + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_TINYINT: + len += sprintf(str + len, "%d ", *((char *)row[i])); + break; + + case TSDB_DATA_TYPE_SMALLINT: + len += sprintf(str + len, "%d ", *((short *)row[i])); + break; + + case TSDB_DATA_TYPE_INT: + len += sprintf(str + len, "%d ", *((int *)row[i])); + break; + + case TSDB_DATA_TYPE_BIGINT: + len += sprintf(str + len, "%ld ", *((int64_t *)row[i])); + break; + + case TSDB_DATA_TYPE_FLOAT: + len += sprintf(str + len, "%f ", *((float *)row[i])); + break; + + case TSDB_DATA_TYPE_DOUBLE: + len += sprintf(str + len, "%lf ", *((double *)row[i])); + break; + + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + /* limit the max length of string to no greater than the maximum length, + * in case of not null-terminated string */ + len += snprintf(str + len, (size_t)fields[i].bytes + 1, "%s ", (char *)row[i]); + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + len += sprintf(str + len, "%ld ", *((int64_t *)row[i])); + break; + + case TSDB_DATA_TYPE_BOOL: + len += sprintf(str + len, "%d ", *((int8_t *)row[i])); + default: + break; + } + } + + return len; +} + +int taos_validate_sql(TAOS *taos, char *sql) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + return TSDB_CODE_DISCONNECTED; + } + + SSqlObj *pSql = pObj->pSql; + SSqlRes *pRes = &pSql->res; + + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + + tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj); + + int32_t sqlLen = strlen(sql); + if (sqlLen > TSDB_MAX_SQL_LEN) { + tscError("%p sql too long", pSql); + pRes->code = TSDB_CODE_INVALID_SQL; + return pRes->code; + } + + pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if (pSql->sqlstr == NULL) { + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("%p failed to malloc sql string buffer", pSql); + tscTrace("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + return pRes->code; + } + + strtolower(sql, pSql->sqlstr); + pSql->sqlstr[sqlLen] = 0; + + pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false); + int code = pRes->code; + + tscTrace("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); + taos_free_result(pSql); + + return code; +} diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c new file mode 100644 index 000000000000..1d45f9ec9802 --- /dev/null +++ b/src/client/src/tscStream.c @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tlog.h" +#include "tsql.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +#include "tsclient.h" +#include "tscUtil.h" + +#include "tscProfile.h" + +static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows); +static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows); +static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql); +static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer); + +static bool isProjectStream(SSqlCmd *pCmd) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr *pExpr = tscSqlExprGet(pCmd, i); + if (pExpr->sqlFuncId != TSDB_FUNC_PRJ) { + return false; + } + } + + return true; +} + +static int64_t tscGetRetryDelayTime(int64_t slidingTime) { + float RETRY_RANGE_FACTOR = 0.3; + + int64_t retryDelta = (int64_t)tsStreamCompRetryDelay * RETRY_RANGE_FACTOR; + retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L; + + if (slidingTime < retryDelta) { + return slidingTime; + } else { + return retryDelta; + } +} + +static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { + SSqlStream *pStream = (SSqlStream *)pMsg->ahandle; + SSqlObj * pSql = pStream->pSql; + + pSql->fp = tscProcessStreamQueryCallback; + pSql->param = pStream; + + int code = tscGetMeterMeta(pSql, pSql->cmd.name); + pSql->res.code = code; + + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + + if (code == 0 && UTIL_METER_IS_METRIC(&pSql->cmd)) { + code = tscGetMetricMeta(pSql, pSql->cmd.name); + pSql->res.code = code; + + if (code == TSDB_CODE_ACTION_IN_PROGRESS) return; + } + + tscTansformSQLFunctionForMetricQuery(&pSql->cmd); + + // failed to get meter/metric meta, retry in 10sec. + if (code != TSDB_CODE_SUCCESS) { + int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime); + tscError("%p stream:%p,get metermeta failed, retry in %ldms.", pStream->pSql, pStream, retryDelayTime); + + tscSetRetryTimer(pStream, pSql, retryDelayTime); + return; + } + + tscTrace("%p stream:%p start stream query on:%s", pSql, pStream, pSql->cmd.name); + tscProcessSql(pStream->pSql); + + tscIncStreamExecutionCount(pStream); +} + +static void tscProcessStreamTimer(void *handle, void *tmrId) { + SSqlStream *pStream = (SSqlStream *)handle; + if (pStream == NULL) return; + if (pStream->pTimer != tmrId) return; + pStream->pTimer = NULL; + + pStream->numOfRes = 0; // reset the numOfRes. + SSqlObj *pSql = pStream->pSql; + + if (isProjectStream(&pSql->cmd)) { + /* + * pSql->cmd.etime, which is the start time, does not change in case of + * repeat first execution, once the first execution failed. + */ + pSql->cmd.stime = pStream->stime; // start time + + pSql->cmd.etime = taosGetTimestampMs(); // end time + if (pSql->cmd.etime > pStream->etime) { + pSql->cmd.etime = pStream->etime; + } + } else { + pSql->cmd.stime = pStream->stime - pStream->interval; + pSql->cmd.etime = pStream->stime - 1; + } + + // launch stream computing in a new thread + SSchedMsg schedMsg; + schedMsg.fp = tscProcessStreamLaunchQuery; + schedMsg.ahandle = pStream; + schedMsg.thandle = (void *)1; + schedMsg.msg = NULL; + taosScheduleTask(tscQhandle, &schedMsg); +} + +static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) { + SSqlStream *pStream = (SSqlStream *)param; + if (tres == NULL || numOfRows < 0) { + int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime); + tscError("%p stream:%p, query data failed, code:%d, retry in %ldms", pStream->pSql, pStream, numOfRows, retryDelay); + + tscClearSqlMetaInfoForce(&(pStream->pSql->cmd)); + tscSetRetryTimer(pStream, pStream->pSql, retryDelay); + return; + } + + taos_fetch_rows_a(tres, tscProcessStreamRetrieveResult, param); +} + +static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql, int32_t numOfRows) { + SSqlRes *pRes = &pSql->res; + int64_t timestamp = *(int64_t *)pRes->data; + + if (timestamp != pStream->stime) { + // reset the timestamp of each agg point by using start time of each interval + *((int64_t *)pRes->data) = pStream->stime - pStream->interval; + tscWarn("%p stream:%p, timestamp of points is:%lld, reset to %lld", pSql, pStream, timestamp, + pStream->stime - pStream->interval); + } +} + +static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) { + SSqlStream *pStream = (SSqlStream *)param; + SSqlObj * pSql = (SSqlObj *)res; + + if (pSql == NULL || numOfRows < 0) { + int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime); + tscError("%p stream:%p, retrieve data failed, code:%d, retry in %ldms", pSql, pStream, numOfRows, retryDelayTime); + tscClearSqlMetaInfoForce(&(pStream->pSql->cmd)); + + tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime); + return; + } + + if (numOfRows > 0) { // save + // when reaching here the first execution of stream computing is successful. + pStream->numOfRes += numOfRows; + TAOS_ROW row = NULL; //; + while ((row = taos_fetch_row(res)) != NULL) { + // char result[512] = {0}; + // taos_print_row(result, row, pSql->cmd.fieldsInfo.pFields, pSql->cmd.fieldsInfo.numOfOutputCols); + // tscPrint("%p stream:%p query result: %s", pSql, pStream, result); + tscTrace("%p stream:%p fetch result", pSql, pStream); + if (isProjectStream(&pSql->cmd)) { + pStream->stime = *(TSKEY *)row[0]; + } else { + tscSetTimestampForRes(pStream, pSql, numOfRows); + } + + // user callback function + (*pStream->fp)(pStream->param, res, row); + } + + // actually only one row is returned. this following is not necessary + taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream); + } else { // numOfRows == 0, all data has been retrieved + pStream->useconds += pSql->res.useconds; + + if (pStream->numOfRes == 0) { + if (pSql->cmd.interpoType == TSDB_INTERPO_SET_VALUE || pSql->cmd.interpoType == TSDB_INTERPO_NULL) { + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + + /* failed to retrieve any result in this retrieve */ + pSql->res.numOfRows = 1; + void *row[TSDB_MAX_COLUMNS] = {0}; + char tmpRes[TSDB_MAX_BYTES_PER_ROW] = {0}; + + void *oldPtr = pSql->res.data; + pSql->res.data = tmpRes; + + for (int32_t i = 1; i < pSql->cmd.fieldsInfo.numOfOutputCols; ++i) { + int16_t offset = tscFieldInfoGetOffset(pCmd, i); + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i); + + assignVal(pSql->res.data + offset, (char *)(&pCmd->defaultVal[i]), pField->bytes, pField->type); + row[i] = pSql->res.data + offset; + } + + tscSetTimestampForRes(pStream, pSql, numOfRows); + row[0] = pRes->data; + + // char result[512] = {0}; + // taos_print_row(result, row, pSql->cmd.fieldsInfo.pFields, pSql->cmd.fieldsInfo.numOfOutputCols); + // tscPrint("%p stream:%p query result: %s", pSql, pStream, result); + tscTrace("%p stream:%p fetch result", pSql, pStream); + + // user callback function + (*pStream->fp)(pStream->param, res, row); + + pRes->numOfRows = 0; + pRes->data = oldPtr; + } else if (isProjectStream(&pSql->cmd)) { + /* no resuls in the query range, retry */ + // todo set retry dynamic time + int32_t retry = tsProjectExecInterval; + tscError("%p stream:%p, retrieve no data, code:%d, retry in %lldms", pSql, pStream, numOfRows, retry); + + tscClearSqlMetaInfoForce(&(pStream->pSql->cmd)); + tscSetRetryTimer(pStream, pStream->pSql, retry); + return; + } + } else { + if (isProjectStream(&pSql->cmd)) { + pStream->stime += 1; + } + } + + tscTrace("%p stream:%p, query on:%s, fetch result completed, fetched rows:%d.", pSql, pStream, pSql->cmd.name, + pStream->numOfRes); + + /* release the metric/meter meta information reference, so data in cache can be updated */ + tscClearSqlMetaInfo(&(pSql->cmd)); + tscSetNextLaunchTimer(pStream, pSql); + } +} + +static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) { + if (isProjectStream(&pSql->cmd)) { + int64_t now = taosGetTimestampMs(); + int64_t etime = now > pStream->etime ? pStream->etime : now; + + if (pStream->etime < now && now - pStream->etime > tsMaxRetentWindow) { + /* + * current time window will be closed, since it too early to exceed the maxRetentWindow value + */ + tscTrace("%p stream:%p, etime:%lld is too old, exceeds the max retention time window:%lld, stop the stream", + pStream->pSql, pStream, pStream->stime, pStream->etime); + // TODO : How to terminate stream here + taos_close_stream(pStream); + if (pStream->callback) { + // Callback function from upper level + pStream->callback(pStream->param); + } + return; + } + + tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", + pStream->pSql, pStream, now + timer, timer, pStream->stime, etime); + } else { + tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", + pStream->pSql, pStream, pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1); + } + + pSql->cmd.command = TSDB_SQL_SELECT; + + // start timer for next computing + taosTmrReset(tscProcessStreamTimer, timer, pStream, tscTmr, &pStream->pTimer); +} + +static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { + int64_t timer = 0; + + if (isProjectStream(&pSql->cmd)) { + /* + * for project query, no mater fetch data successfully or not, next launch will issue + * more than the sliding time window + */ + timer = pStream->slidingTime; + if (pStream->stime > pStream->etime) { + tscTrace("%p stream:%p, stime:%lld is larger than end time: %lld, stop the stream", + pStream->pSql, pStream, pStream->stime, pStream->etime); + // TODO : How to terminate stream here + taos_close_stream(pStream); + if (pStream->callback) { + // Callback function from upper level + pStream->callback(pStream->param); + } + return; + } + } else { + pStream->stime += pStream->slidingTime; + if ((pStream->stime - pStream->interval) >= pStream->etime) { + tscTrace("%p stream:%p, stime:%ld is larger than end time: %ld, stop the stream", + pStream->pSql, pStream, pStream->stime, pStream->etime); + // TODO : How to terminate stream here + taos_close_stream(pStream); + if (pStream->callback) { + // Callback function from upper level + pStream->callback(pStream->param); + } + return; + } + + timer = pStream->stime - taosGetTimestampSec() * 1000L; + if (timer < 0) { + timer = 0; + } + } + + int64_t delayDelta = (int64_t)(pStream->slidingTime * 0.1); + delayDelta = (rand() % delayDelta); + if (delayDelta > tsMaxStreamComputDelay) { + delayDelta = tsMaxStreamComputDelay; + } + + timer += delayDelta; // a random number + tscSetRetryTimer(pStream, pSql, timer); +} + +TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + int64_t stime, void *param, void (*callback)(void *)) { + STscObj *pObj = (STscObj *)taos; + if (pObj == NULL || pObj->signature != pObj) return NULL; + + SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); + if (pSql == NULL) { + if (tscEmbedded) { + tscError("%p server out of memory", pSql); + pSql->res.code = TSDB_CODE_SERV_OUT_OF_MEMORY; + } else { + tscError("%p client out of memory", pSql); + pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + return NULL; + } + + pSql->signature = pSql; + pSql->pTscObj = pObj; + SSqlCmd *pCmd = &pSql->cmd; + SSqlRes *pRes = &pSql->res; + tscAllocPayloadWithSize(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); + + int32_t len = strlen(sqlstr); + pSql->sqlstr = malloc(strlen(sqlstr) + 1); + if (pSql->sqlstr == NULL) { + if (tscEmbedded) { + tscError("%p server out of memory", pSql); + pSql->res.code = TSDB_CODE_SERV_OUT_OF_MEMORY; + } else { + tscError("%p client out of memory", pSql); + pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + return NULL; + } + strcpy(pSql->sqlstr, sqlstr); + pSql->sqlstr[len] = 0; + + sem_init(&pSql->rspSem, 0, 0); + sem_init(&pSql->emptyRspSem, 0, 1); + + SSqlInfo SQLInfo = {0}; + tSQLParse(&SQLInfo, pSql->sqlstr); + pRes->code = tscToSQLCmd(pSql, &SQLInfo); + SQLInfoDestroy(&SQLInfo); + + if (pRes->code != 0) { + tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code); + tscFreeSqlObj(pSql); + return NULL; + } + + SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream)); + if (pStream == NULL) return NULL; + + pStream->fp = fp; + pStream->callback = callback; + pStream->param = param; + pStream->pSql = pSql; + pStream->ctime = taosGetTimestampMs(); + pStream->etime = (pCmd->etime) ? pCmd->etime : INT64_MAX; + + pSql->pStream = pStream; + tscAddIntoStreamList(pStream); + + if (pCmd->nAggTimeInterval < tsMinIntervalTime) { + tscWarn("%p stream:%p, original sample interval:%ldms too small, reset to:%ldms", pSql, pStream, + pCmd->nAggTimeInterval, tsMinIntervalTime); + + pCmd->nAggTimeInterval = tsMinIntervalTime; + } + pStream->interval = pCmd->nAggTimeInterval; // it shall be derived from sql string + + if (pCmd->nSlidingTime == 0) { + pCmd->nSlidingTime = pCmd->nAggTimeInterval; + } + + if (pCmd->nSlidingTime < tsMinSlidingTime) { + tscWarn("%p stream:%p, original sliding value:%lldms too small, reset to:%lldms", + pSql, pStream, pCmd->nSlidingTime, tsMinSlidingTime); + + pCmd->nSlidingTime = tsMinSlidingTime; + } + + if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) { + tscWarn("%p stream:%p, sliding value:%lldms can not be larger than interval range, reset to:%lldms", + pSql, pStream, pCmd->nSlidingTime, pCmd->nAggTimeInterval); + + pCmd->nSlidingTime = pCmd->nAggTimeInterval; + } + + pStream->slidingTime = pCmd->nSlidingTime; + + if (isProjectStream(pCmd)) { + // no data in table, flush all data till now to destination meter, 10sec delay + pStream->interval = tsProjectExecInterval; + pStream->slidingTime = tsProjectExecInterval; + + if (stime != 0) { // first projection start from the latest event timestamp + assert(stime >= pCmd->etime); + stime += 1; // exclude the last records from table + } else { + stime = pCmd->etime; + } + } else { + // timewindow based aggregation stream + if (stime == 0) { // no data in meter till now + stime = ((int64_t)taosGetTimestampSec() * 1000L / pStream->interval) * pStream->interval; + tscWarn("%p stream:%p, last timestamp:0, reset to:%lld", pSql, pStream, stime, stime); + } else { + int64_t newStime = (stime / pStream->interval) * pStream->interval; + if (newStime != stime) { + tscWarn("%p stream:%p, last timestamp:%lld, reset to:%lld", pSql, pStream, stime, newStime); + stime = newStime; + } + } + } + + pStream->stime = stime; + + int64_t timer = pStream->stime - taosGetTimestampSec() * 1000L; + if (timer < 0) timer = 0; + + int64_t delayDelta = (int64_t)(pStream->interval * 0.1); + if (delayDelta > tsMaxStreamComputDelay) { + delayDelta = tsMaxStreamComputDelay; + } + + srand(time(NULL)); + timer += (rand() % delayDelta); // a random number + + if (timer < tsStreamCompStartDelay || timer > tsMaxStreamComputDelay) { + timer = (timer % tsStreamCompStartDelay) + tsStreamCompStartDelay; + } + + taosTmrReset(tscProcessStreamTimer, timer, pStream, tscTmr, &pStream->pTimer); + tscTrace("%p stream:%p is opened, query on:%s, interval:%ld, sliding:%ld, first launched in:%ld ms, sql:%s", + pSql, pStream, pSql->cmd.name, pStream->interval, pStream->slidingTime, timer, sqlstr); + + return pStream; +} + +void taos_close_stream(TAOS_STREAM *handle) { + SSqlStream *pStream = (SSqlStream *)handle; + + SSqlObj *pSql = (SSqlObj *)__sync_val_compare_and_swap_64(&pStream->pSql, pStream->pSql, 0); + if (pSql == NULL) { + return; + } + + /* + * stream may be closed twice, 1. drop dst table, 2. kill stream + * Here, we need a check before release memory + */ + if (pSql->signature == pSql) { + tscRemoveFromStreamList(pStream, pSql); + + taosTmrStopA(&(pStream->pTimer)); + tscFreeSqlObj(pSql); + pStream->pSql = NULL; + + tscTrace("%p stream:%p is closed", pSql, pStream); + tfree(pStream); + } +} diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c new file mode 100644 index 000000000000..e940fae535fe --- /dev/null +++ b/src/client/src/tscSub.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "shash.h" +#include "taos.h" +#include "tlog.h" +#include "trpc.h" +#include "tsclient.h" +#include "tsocket.h" +#include "ttime.h" +#include "tutil.h" + +typedef struct { + void * signature; + char name[TSDB_METER_ID_LEN]; + int mseconds; + TSKEY lastKey; + uint64_t stime; + TAOS_FIELD fields[TSDB_MAX_COLUMNS]; + int numOfFields; + TAOS * taos; + TAOS_RES * result; +} SSub; + +TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *name, int64_t time, int mseconds) { + SSub *pSub; + + pSub = (SSub *)malloc(sizeof(SSub)); + if (pSub == NULL) return NULL; + memset(pSub, 0, sizeof(SSub)); + + pSub->signature = pSub; + strcpy(pSub->name, name); + pSub->mseconds = mseconds; + pSub->lastKey = time; + if (pSub->lastKey == 0) { + pSub->lastKey = taosGetTimestampMs(); + } + + taos_init(); + pSub->taos = taos_connect(host, user, pass, NULL, 0); + if (pSub->taos == NULL) { + tfree(pSub); + } else { + char qstr[128]; + sprintf(qstr, "use %s", db); + int res = taos_query(pSub->taos, qstr); + if (res != 0) { + tscError("failed to open DB:%s", db); + taos_close(pSub->taos); + tfree(pSub); + } else { + sprintf(qstr, "select * from %s where _c0 > now+1000d", pSub->name); + if (taos_query(pSub->taos, qstr)) { + tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); + taos_close(pSub->taos); + tfree(pSub); + return NULL; + } + pSub->result = taos_use_result(pSub->taos); + pSub->numOfFields = taos_num_fields(pSub->result); + memcpy(pSub->fields, taos_fetch_fields(pSub->result), sizeof(TAOS_FIELD) * pSub->numOfFields); + } + } + + return pSub; +} + +TAOS_ROW taos_consume(TAOS_SUB *tsub) { + SSub * pSub = (SSub *)tsub; + TAOS_ROW row; + char qstr[256]; + + if (pSub == NULL) return NULL; + if (pSub->signature != pSub) return NULL; + + while (1) { + if (pSub->result != NULL) { + row = taos_fetch_row(pSub->result); + if (row != NULL) { + pSub->lastKey = *((uint64_t *)row[0]); + return row; + } + + taos_free_result(pSub->result); + pSub->result = NULL; + uint64_t etime = taosGetTimestampMs(); + time_t mseconds = pSub->mseconds - etime + pSub->stime; + if (mseconds < 0) mseconds = 0; + taosMsleep((int)mseconds); + } + + pSub->stime = taosGetTimestampMs(); + + sprintf(qstr, "select * from %s where _c0 > %ld order by _c0 asc", pSub->name, pSub->lastKey); + if (taos_query(pSub->taos, qstr)) { + tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); + return NULL; + } + + pSub->result = taos_use_result(pSub->taos); + + if (pSub->result == NULL) { + tscTrace("failed to get result, reason:%s", taos_errstr(pSub->taos)); + return NULL; + } + } + + return NULL; +} + +void taos_unsubscribe(TAOS_SUB *tsub) { + SSub *pSub = (SSub *)tsub; + + if (pSub == NULL) return; + if (pSub->signature != pSub) return; + + taos_close(pSub->taos); + free(pSub); +} + +int taos_subfields_count(TAOS_SUB *tsub) { + SSub *pSub = (SSub *)tsub; + + return pSub->numOfFields; +} + +TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub) { + SSub *pSub = (SSub *)tsub; + + return pSub->fields; +} diff --git a/src/client/src/tscSyntaxtreefunction.c b/src/client/src/tscSyntaxtreefunction.c new file mode 100644 index 000000000000..f231d139352b --- /dev/null +++ b/src/client/src/tscSyntaxtreefunction.c @@ -0,0 +1,1307 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "tscSyntaxtreefunction.h" +#include "tsql.h" +#include "ttypes.h" +#include "tutil.h" + +#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ + { \ + int32_t i = ((_ord) == TSQL_SO_ASC) ? 0 : MAX(len1, len2) - 1; \ + int32_t step = ((_ord) == TSQL_SO_ASC) ? 1 : -1; \ + \ + if ((len1) == (len2)) { \ + for (; i < (len2) && i >= 0; i += step) { \ + if (isNull((char *)&(left)[i], _left_type) || isNull((char *)&(right)[i], _right_type)) { \ + setNull((char *)&(out)[i], _res_type, tDataTypeDesc[_res_type].nSize); \ + continue; \ + } \ + *(out) = (double)(left)[i] op(right)[i]; \ + (out) += step; \ + } \ + } else if ((len1) == 1) { \ + for (; i >= 0 && i < (len2); i += step) { \ + if (isNull((char *)&(left)[i], _left_type) || isNull((char *)&(right)[i], _right_type)) { \ + setNull((char *)&(out)[i], _res_type, tDataTypeDesc[_res_type].nSize); \ + continue; \ + } \ + *(out) = (double)pLeft[0] op(pRight)[i]; \ + (out) += step; \ + } \ + } else if ((len2) == 1) { \ + for (; i >= 0 && i < (len1); i += step) { \ + if (isNull((char *)&(left)[i], _left_type) || isNull((char *)&(right)[i], _right_type)) { \ + setNull((char *)&(out)[i], _res_type, tDataTypeDesc[_res_type].nSize); \ + continue; \ + } \ + *(out) = (double)(pLeft)[i] op(pRight)[0]; \ + (out) += step; \ + } \ + } \ + } + +#define ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ + { \ + int32_t i = (_ord == TSQL_SO_ASC) ? 0 : MAX(len1, len2) - 1; \ + int32_t step = (_ord == TSQL_SO_ASC) ? 1 : -1; \ + \ + if (len1 == (len2)) { \ + for (; i >= 0 && i < (len2); i += step) { \ + if (isNull((char *)&left[i], _left_type) || isNull((char *)&right[i], _right_type)) { \ + setNull((char *)&out[i], _res_type, tDataTypeDesc[_res_type].nSize); \ + continue; \ + } \ + *(out) = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; \ + (out) += step; \ + } \ + } else if (len1 == 1) { \ + for (; i >= 0 && i < (len2); i += step) { \ + if (isNull((char *)&left[i], _left_type) || isNull((char *)&right[i], _right_type)) { \ + setNull((char *)&out[i], _res_type, tDataTypeDesc[_res_type].nSize); \ + continue; \ + } \ + *(out) = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; \ + (out) += step; \ + } \ + } else if ((len2) == 1) { \ + for (; i >= 0 && i < len1; i += step) { \ + if (isNull((char *)&left[i], _left_type) || isNull((char *)&right[i], _right_type)) { \ + setNull((char *)&out[i], _res_type, tDataTypeDesc[_res_type].nSize); \ + continue; \ + } \ + *(out) = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[0])) * pRight[0]; \ + (out) += step; \ + } \ + } \ + } + +#define ARRAY_LIST_ADD(left, right, _left_type, _right_type, len1, len2, out, _ord) \ + ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, +, TSDB_DATA_TYPE_DOUBLE, _ord) +#define ARRAY_LIST_SUB(left, right, _left_type, _right_type, len1, len2, out, _ord) \ + ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, -, TSDB_DATA_TYPE_DOUBLE, _ord) +#define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \ + ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord) +#define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \ + ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord) +#define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \ + ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord) + +#define TYPE_CONVERT_DOUBLE_RES(left, right, out, _type_left, _type_right, _type_res) \ + _type_left * pLeft = (_type_left *)(left); \ + _type_right *pRight = (_type_right *)(right); \ + _type_res * pOutput = (_type_res *)(out); + +#define TYPE_CONVERT(left, right, out, _type_left, _type_right) \ + TYPE_CONVERT_DOUBLE_RES(left, right, out, _type_left, _type_right, double) + +void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + int32_t *pLeft = (int32_t *)left; + int32_t *pRight = (int32_t *)right; + double * pOutput = (double *)output; + + int32_t i = (order == TSQL_SO_ASC) ? 0 : MAX(numLeft, numRight) - 1; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (numLeft == numRight) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + + *pOutput = (double)pLeft[i] + pRight[i]; + pOutput += step; + } + } else if (numLeft == 1) { + for (; i >= 0 && i < numRight; ++i) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + + *pOutput = (double)pLeft[0] + pRight[i]; + pOutput += step; + } + } else if (numRight == 1) { + for (; i >= 0 && i < numLeft; ++i) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] + pRight[0]; + pOutput += step; + } + } +} + +void calc_fn_i32_i8_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int8_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i16_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int16_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i64_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int64_t); + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_f_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, float) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_d_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, double) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i8_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int8_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i16_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int16_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_i8_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i8_i64_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int64_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_f_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, float) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_d_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, double) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i8_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_i16_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i16_i16_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int16_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_i16_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i16_i64_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int64_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_f_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, float) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_d_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, double) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i8_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_i64_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i64_i16_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i16_i64_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i64_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_i64_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i64_i64_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int64_t) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_f_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, float) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_d_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, double) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i8_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_f_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_i16_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i16_f_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_f_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_i64_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i64_f_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_f_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, float) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_d_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, double) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i8_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_d_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_i16_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i16_d_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_i32_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_d_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_i64_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i64_d_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_f_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_f_d_add(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_d_add(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, double) + ARRAY_LIST_ADD(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////// +void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + int32_t *pLeft = (int32_t *)left; + int32_t *pRight = (int32_t *)right; + double * pOutput = (double *)output; + + int32_t i = (order == TSQL_SO_ASC) ? 0 : MAX(numLeft, numRight) - 1; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (numLeft == numRight) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] - pRight[i]; + pOutput += step; + } + } else if (numLeft == 1) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[0] - pRight[i]; + pOutput += step; + } + } else if (numRight == 1) { + for (; i >= 0 && i < numLeft; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] - pRight[0]; + pOutput += step; + } + } +} + +void calc_fn_i32_i8_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int8_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i16_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int16_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i64_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int64_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_f_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, float) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_d_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, double) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i8_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int8_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i16_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int16_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int32_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i64_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int64_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_f_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, float) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_d_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, double) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i8_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int8_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i16_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int16_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int32_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i64_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int64_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_f_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, float) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_d_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, double) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i8_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int8_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i16_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int16_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int32_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i64_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int64_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_f_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, float) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_d_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, double) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i8_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int8_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i16_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int16_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int32_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i64_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int64_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_f_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, float) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_d_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, double) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i8_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int8_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i16_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int16_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int32_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i64_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int64_t) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_f_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, float) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_d_sub(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, double) + ARRAY_LIST_SUB(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////// +void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + int32_t *pLeft = (int32_t *)left; + int32_t *pRight = (int32_t *)right; + double * pOutput = (double *)output; + + int32_t i = (order == TSQL_SO_ASC) ? 0 : MAX(numLeft, numRight) - 1; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (numLeft == numRight) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] * pRight[i]; + pOutput += step; + } + } else if (numLeft == 1) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[0] * pRight[i]; + pOutput += step; + } + } else if (numRight == 1) { + for (; i >= 0 && i < numLeft; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] * pRight[0]; + pOutput += step; + } + } +} + +void calc_fn_i32_i8_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int8_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i16_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int16_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i64_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int64_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_f_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, float) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_d_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, double) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i8_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int8_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i16_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int16_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i32_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_i8_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i8_i64_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int64_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_f_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, float) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_d_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, double) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i8_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_i16_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i16_i16_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int16_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i32_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_i16_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i16_i64_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int64_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_f_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, float) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_d_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, double) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i8_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_i64_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i64_i16_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i16_i64_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i64_i32_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_i64_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_i64_i64_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int64_t) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_f_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, float) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_d_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, double) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i8_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_f_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_i16_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i16_f_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_i32_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_f_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_i64_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i64_f_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_f_f_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, float) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_d_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, double) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i8_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i8_d_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_i16_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i16_d_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_i32_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i32_d_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_i64_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_i64_d_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_f_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + calc_fn_f_d_multi(right, left, numRight, numLeft, output, order); +} + +void calc_fn_d_d_multi(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, double) + ARRAY_LIST_MULTI(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////// +void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + int32_t *pLeft = (int32_t *)left; + int32_t *pRight = (int32_t *)right; + double * pOutput = (double *)output; + + int32_t i = (order == TSQL_SO_ASC) ? 0 : MAX(numLeft, numRight) - 1; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (numLeft == numRight) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] / pRight[i]; + pOutput += step; + } + } else if (numLeft == 1) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[0] / pRight[i]; + pOutput += step; + } + } else if (numRight == 1) { + for (; i >= 0 && i < numLeft; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] / pRight[0]; + pOutput += step; + } + } +} + +void calc_fn_i32_i8_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int8_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i16_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int16_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i64_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int64_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_f_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, float) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_d_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, double) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i8_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int8_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i16_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int16_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i32_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int32_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i64_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int64_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_f_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, float) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_d_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, double) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i8_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int8_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i16_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int16_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i32_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int32_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i64_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int64_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_f_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, float) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_d_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, double) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i8_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int8_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i16_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int16_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i32_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int32_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i64_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int64_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_f_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, float) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_d_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, double) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i8_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int8_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i16_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int16_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i32_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int32_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i64_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int64_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_f_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, float) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_d_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, double) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i8_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int8_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i16_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int16_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i32_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int32_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i64_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int64_t) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_f_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, float) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_d_div(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, double) + ARRAY_LIST_DIV(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////// + +void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + int32_t *pLeft = (int32_t *)left; + int32_t *pRight = (int32_t *)right; + double * pOutput = (double *)output; + + int32_t i = (order == TSQL_SO_ASC) ? 0 : MAX(numLeft, numRight) - 1; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (numLeft == numRight) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + + *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; + pOutput += step; + } + } else if (numLeft == 1) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; + pOutput += step; + } + } else if (numRight == 1) { + for (; i >= 0 && i < numLeft; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_INT)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[0])) * pRight[0]; + pOutput += step; + } + } +} + +void calc_fn_i32_i8_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int8_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i16_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int16_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_i64_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, int64_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_f_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int32_t, float) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + int32_t *pLeft = (int32_t *)left; + double * pRight = (double *)right; + double * pOutput = (double *)output; + + int32_t i = (order == TSQL_SO_ASC) ? 0 : MAX(numLeft, numRight) - 1; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (numLeft == numRight) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_DOUBLE)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i]; + pOutput += step; + } + } else if (numLeft == 1) { + for (; i >= 0 && i < numRight; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_DOUBLE)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i]; + pOutput += step; + } + } else if (numRight == 1) { + for (; i >= 0 && i < numLeft; i += step) { + if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)&pRight[i], TSDB_DATA_TYPE_DOUBLE)) { + setNull((char *)&pOutput[i], TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + continue; + } + *pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[0])) * pRight[0]; + pOutput += step; + } + } +} + +void calc_fn_i8_i8_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int8_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i16_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int16_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int32_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_i64_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, int64_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_f_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, float) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i8_d_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int8_t, double) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i8_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int8_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i16_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int16_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int32_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_i64_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, int64_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_f_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, float) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i16_d_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int16_t, double) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i8_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int8_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i16_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int16_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int32_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_i64_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, int64_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_f_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, float) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_i64_d_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, int64_t, double) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i8_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int8_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i16_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int16_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int32_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_i64_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, int64_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_f_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, float) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_f_d_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, float, double) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i8_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int8_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_TINYINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i16_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int16_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_SMALLINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int32_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_INT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_i64_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, int64_t) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_BIGINT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_f_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, float) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_FLOAT, numLeft, numRight, pOutput, order); +} + +void calc_fn_d_d_rem(void *left, void *right, int32_t numLeft, int32_t numRight, void *output, int32_t order) { + TYPE_CONVERT(left, right, output, double, double) + ARRAY_LIST_REM(pLeft, pRight, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_DOUBLE, numLeft, numRight, pOutput, order); +} + +/* + * the following are two-dimensional array list of callback function . + */ +_bi_consumer_fn_t add_function_arraylist[8][10] = { + /*NULL, bool, tinyint, smallint, int, bigint, float, double, timestamp, + binary*/ + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // EMPTY, + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // TSDB_DATA_TYPE_BOOL, + {NULL, NULL, calc_fn_i8_i8_add, calc_fn_i8_i16_add, calc_fn_i8_i32_add, calc_fn_i8_i64_add, calc_fn_i8_f_add, + calc_fn_i8_d_add, NULL, NULL}, // TSDB_DATA_TYPE_TINYINT + {NULL, NULL, calc_fn_i16_i8_add, calc_fn_i16_i16_add, calc_fn_i16_i32_add, calc_fn_i16_i64_add, calc_fn_i16_f_add, + calc_fn_i16_d_add, NULL, NULL}, // TSDB_DATA_TYPE_SMALLINT + {NULL, NULL, calc_fn_i32_i8_add, calc_fn_i32_i16_add, calc_fn_i32_i32_add, calc_fn_i32_i64_add, calc_fn_i32_f_add, + calc_fn_i32_d_add, NULL, NULL}, // TSDB_DATA_TYPE_INT + {NULL, NULL, calc_fn_i64_i8_add, calc_fn_i64_i16_add, calc_fn_i64_i32_add, calc_fn_i64_i64_add, calc_fn_i64_f_add, + calc_fn_i64_d_add, NULL, NULL}, // TSDB_DATA_TYPE_BIGINT + {NULL, NULL, calc_fn_f_i8_add, calc_fn_f_i16_add, calc_fn_f_i32_add, calc_fn_f_i64_add, calc_fn_f_f_add, + calc_fn_f_d_add, NULL, NULL}, // TSDB_DATA_TYPE_FLOAT + {NULL, NULL, calc_fn_d_i8_add, calc_fn_d_i16_add, calc_fn_d_i32_add, calc_fn_d_i64_add, calc_fn_d_f_add, + calc_fn_d_d_add, NULL, NULL}, // TSDB_DATA_TYPE_DOUBLE +}; + +_bi_consumer_fn_t sub_function_arraylist[8][10] = { + /*NULL, bool, tinyint, smallint, int, bigint, float, double, timestamp, + binary*/ + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // EMPTY, + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // TSDB_DATA_TYPE_BOOL, + {NULL, NULL, calc_fn_i8_i8_sub, calc_fn_i8_i16_sub, calc_fn_i8_i32_sub, calc_fn_i8_i64_sub, calc_fn_i8_f_sub, + calc_fn_i8_d_sub, NULL, NULL}, // TSDB_DATA_TYPE_TINYINT + {NULL, NULL, calc_fn_i16_i8_sub, calc_fn_i16_i16_sub, calc_fn_i16_i32_sub, calc_fn_i16_i64_sub, calc_fn_i16_f_sub, + calc_fn_i16_d_sub, NULL, NULL}, // TSDB_DATA_TYPE_SMALLINT + {NULL, NULL, calc_fn_i32_i8_sub, calc_fn_i32_i16_sub, calc_fn_i32_i32_sub, calc_fn_i32_i64_sub, calc_fn_i32_f_sub, + calc_fn_i32_d_sub, NULL, NULL}, // TSDB_DATA_TYPE_INT + {NULL, NULL, calc_fn_i64_i8_sub, calc_fn_i64_i16_sub, calc_fn_i64_i32_sub, calc_fn_i64_i64_sub, calc_fn_i64_f_sub, + calc_fn_i64_d_sub, NULL, NULL}, // TSDB_DATA_TYPE_BIGINT + {NULL, NULL, calc_fn_f_i8_sub, calc_fn_f_i16_sub, calc_fn_f_i32_sub, calc_fn_f_i64_sub, calc_fn_f_f_sub, + calc_fn_f_d_sub, NULL, NULL}, // TSDB_DATA_TYPE_FLOAT + {NULL, NULL, calc_fn_d_i8_sub, calc_fn_d_i16_sub, calc_fn_d_i32_sub, calc_fn_d_i64_sub, calc_fn_d_f_sub, + calc_fn_d_d_sub, NULL, NULL}, // TSDB_DATA_TYPE_DOUBLE +}; + +_bi_consumer_fn_t multi_function_arraylist[][10] = { + /*NULL, bool, tinyint, smallint, int, bigint, float, double, timestamp, + binary*/ + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // EMPTY, + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // TSDB_DATA_TYPE_BOOL, + {NULL, NULL, calc_fn_i8_i8_multi, calc_fn_i8_i16_multi, calc_fn_i8_i32_multi, calc_fn_i8_i64_multi, + calc_fn_i8_f_multi, calc_fn_i8_d_multi, NULL, NULL}, // TSDB_DATA_TYPE_TINYINT + {NULL, NULL, calc_fn_i16_i8_multi, calc_fn_i16_i16_multi, calc_fn_i16_i32_multi, calc_fn_i16_i64_multi, + calc_fn_i16_f_multi, calc_fn_i16_d_multi, NULL, NULL}, // TSDB_DATA_TYPE_SMALLINT + {NULL, NULL, calc_fn_i32_i8_multi, calc_fn_i32_i16_multi, calc_fn_i32_i32_multi, calc_fn_i32_i64_multi, + calc_fn_i32_f_multi, calc_fn_i32_d_multi, NULL, NULL}, // TSDB_DATA_TYPE_INT + {NULL, NULL, calc_fn_i64_i8_multi, calc_fn_i64_i16_multi, calc_fn_i64_i32_multi, calc_fn_i64_i64_multi, + calc_fn_i64_f_multi, calc_fn_i64_d_multi, NULL, NULL}, // TSDB_DATA_TYPE_BIGINT + {NULL, NULL, calc_fn_f_i8_multi, calc_fn_f_i16_multi, calc_fn_f_i32_multi, calc_fn_f_i64_multi, calc_fn_f_f_multi, + calc_fn_f_d_multi, NULL, NULL}, // TSDB_DATA_TYPE_FLOAT + {NULL, NULL, calc_fn_d_i8_multi, calc_fn_d_i16_multi, calc_fn_d_i32_multi, calc_fn_d_i64_multi, calc_fn_d_f_multi, + calc_fn_d_d_multi, NULL, NULL}, // TSDB_DATA_TYPE_DOUBLE +}; + +_bi_consumer_fn_t div_function_arraylist[8][10] = { + /*NULL, bool, tinyint, smallint, int, bigint, float, double, timestamp, + binary*/ + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // EMPTY, + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // TSDB_DATA_TYPE_BOOL, + {NULL, NULL, calc_fn_i8_i8_div, calc_fn_i8_i16_div, calc_fn_i8_i32_div, calc_fn_i8_i64_div, calc_fn_i8_f_div, + calc_fn_i8_d_div, NULL, NULL}, // TSDB_DATA_TYPE_TINYINT + {NULL, NULL, calc_fn_i16_i8_div, calc_fn_i16_i16_div, calc_fn_i16_i32_div, calc_fn_i16_i64_div, calc_fn_i16_f_div, + calc_fn_i16_d_div, NULL, NULL}, // TSDB_DATA_TYPE_SMALLINT + {NULL, NULL, calc_fn_i32_i8_div, calc_fn_i32_i16_div, calc_fn_i32_i32_div, calc_fn_i32_i64_div, calc_fn_i32_f_div, + calc_fn_i32_d_div, NULL, NULL}, // TSDB_DATA_TYPE_INT + {NULL, NULL, calc_fn_i64_i8_div, calc_fn_i64_i16_div, calc_fn_i64_i32_div, calc_fn_i64_i64_div, calc_fn_i64_f_div, + calc_fn_i64_d_div, NULL, NULL}, // TSDB_DATA_TYPE_BIGINT + {NULL, NULL, calc_fn_f_i8_div, calc_fn_f_i16_div, calc_fn_f_i32_div, calc_fn_f_i64_div, calc_fn_f_f_div, + calc_fn_f_d_div, NULL, NULL}, // TSDB_DATA_TYPE_FLOAT + {NULL, NULL, calc_fn_d_i8_div, calc_fn_d_i16_div, calc_fn_d_i32_div, calc_fn_d_i64_div, calc_fn_d_f_div, + calc_fn_d_d_div, NULL, NULL}, // TSDB_DATA_TYPE_DOUBLE +}; + +_bi_consumer_fn_t rem_function_arraylist[8][10] = { + /*NULL, bool, tinyint, smallint, int, bigint, float, double, timestamp, + binary*/ + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // EMPTY, + {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, // TSDB_DATA_TYPE_BOOL, + {NULL, NULL, calc_fn_i8_i8_rem, calc_fn_i8_i16_rem, calc_fn_i8_i32_rem, calc_fn_i8_i64_rem, calc_fn_i8_f_rem, + calc_fn_i8_d_rem, NULL, NULL}, // TSDB_DATA_TYPE_TINYINT + {NULL, NULL, calc_fn_i16_i8_rem, calc_fn_i16_i16_rem, calc_fn_i16_i32_rem, calc_fn_i16_i64_rem, calc_fn_i16_f_rem, + calc_fn_i16_d_rem, NULL, NULL}, // TSDB_DATA_TYPE_SMALLINT + {NULL, NULL, calc_fn_i32_i8_rem, calc_fn_i32_i16_rem, calc_fn_i32_i32_rem, calc_fn_i32_i64_rem, calc_fn_i32_f_rem, + calc_fn_i32_d_rem, NULL, NULL}, // TSDB_DATA_TYPE_INT + {NULL, NULL, calc_fn_i64_i8_rem, calc_fn_i64_i16_rem, calc_fn_i64_i32_rem, calc_fn_i64_i64_rem, calc_fn_i64_f_rem, + calc_fn_i64_d_rem, NULL, NULL}, // TSDB_DATA_TYPE_BIGINT + {NULL, NULL, calc_fn_f_i8_rem, calc_fn_f_i16_rem, calc_fn_f_i32_rem, calc_fn_f_i64_rem, calc_fn_f_f_rem, + calc_fn_f_d_rem, NULL, NULL}, // TSDB_DATA_TYPE_FLOAT + {NULL, NULL, calc_fn_d_i8_rem, calc_fn_d_i16_rem, calc_fn_d_i32_rem, calc_fn_d_i64_rem, calc_fn_d_f_rem, + calc_fn_d_d_rem, NULL, NULL}, // TSDB_DATA_TYPE_DOUBLE +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////// + +_bi_consumer_fn_t tGetBiConsumerFn(int32_t leftType, int32_t rightType, int32_t optr) { + switch (optr) { + case TSDB_BINARY_OP_ADD: + return add_function_arraylist[leftType][rightType]; + case TSDB_BINARY_OP_SUBTRACT: + return sub_function_arraylist[leftType][rightType]; + case TSDB_BINARY_OP_MULTIPLY: + return multi_function_arraylist[leftType][rightType]; + case TSDB_BINARY_OP_DIVIDE: + return div_function_arraylist[leftType][rightType]; + case TSDB_BINARY_OP_REMAINDER: + return rem_function_arraylist[leftType][rightType]; + default: + return NULL; + } + return NULL; +} diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c new file mode 100644 index 000000000000..47f1f0da4590 --- /dev/null +++ b/src/client/src/tscSystem.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tcache.h" +#include "tlog.h" +#include "trpc.h" +#include "tsdb.h" +#include "tsocket.h" +#include "tsystem.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +#include "tsclient.h" +// global, not configurable +void * pVnodeConn; +void * pVMeterConn; +void * pTscMgmtConn; +void * pSlaveConn; +void * tscCacheHandle; +uint8_t globalCode = 0; +int initialized = 0; +int slaveIndex; +void * tscTmr; +void * tscQhandle; +void * tscConnCache; +int tsInsertHeadSize; + +extern int tscEmbedded; +int tscNumOfThreads; +static pthread_once_t tscinit = PTHREAD_ONCE_INIT; + +void taos_init_imp() { + char temp[128]; + struct stat dirstat; + SRpcInit rpcInit; + + srand(taosGetTimestampSec()); + + if (tscEmbedded == 0) { + /* + * set localIp = 0 + * means unset tsLocalIp in client + * except read from config file + */ + strcpy(tsLocalIp, "0.0.0.0"); + + // Read global configuration. + tsReadGlobalLogConfig(); + + // For log directory + if (stat(logDir, &dirstat) < 0) mkdir(logDir, 0755); + + sprintf(temp, "%s/taoslog", logDir); + if (taosInitLog(temp, tsNumOfLogLines, 10) < 0) { + printf("failed to open log file:%s", temp); + } + + tsReadGlobalConfig(); + tsPrintGlobalConfig(); + + tscTrace("starting to initialize TAOS client ..."); + tscTrace("Local IP address is:%s", tsLocalIp); + } + + tscInitMsgs(); + slaveIndex = rand(); + int queueSize = tsMaxVnodeConnections + tsMaxMeterConnections + tsMaxMgmtConnections + tsMaxMgmtConnections; + + if (tscEmbedded == 0) { + tscNumOfThreads = tsNumOfCores * tsNumOfThreadsPerCore / 2.0; + } else { + tscNumOfThreads = tsNumOfCores * tsNumOfThreadsPerCore / 4.0; + } + + if (tscNumOfThreads < 2) tscNumOfThreads = 2; + + tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc"); + + memset(&rpcInit, 0, sizeof(rpcInit)); + rpcInit.localIp = tsLocalIp; + rpcInit.localPort = 0; + rpcInit.label = "TSC-vnode"; + rpcInit.numOfThreads = tscNumOfThreads; + rpcInit.fp = tscProcessMsgFromServer; + rpcInit.bits = 20; + rpcInit.numOfChanns = tscNumOfThreads; + rpcInit.sessionsPerChann = tsMaxVnodeConnections / tscNumOfThreads; + rpcInit.idMgmt = TAOS_ID_FREE; + rpcInit.noFree = 1; + rpcInit.connType = TAOS_CONN_UDP; + rpcInit.qhandle = tscQhandle; + pVnodeConn = taosOpenRpc(&rpcInit); + if (pVnodeConn == NULL) { + tscError("failed to init connection to vnode"); + return; + } + + for (int i = 0; i < tscNumOfThreads; ++i) taosOpenRpcChann(pVnodeConn, i, rpcInit.sessionsPerChann); + + memset(&rpcInit, 0, sizeof(rpcInit)); + rpcInit.localIp = tsLocalIp; + rpcInit.localPort = 0; + rpcInit.label = "TSC-mgmt"; + rpcInit.numOfThreads = 1; + rpcInit.fp = tscProcessMsgFromServer; + rpcInit.bits = 20; + rpcInit.numOfChanns = 1; + rpcInit.sessionsPerChann = tsMaxMgmtConnections; + rpcInit.idMgmt = TAOS_ID_FREE; + rpcInit.noFree = 1; + rpcInit.connType = TAOS_CONN_UDP; + rpcInit.qhandle = tscQhandle; + pTscMgmtConn = taosOpenRpc(&rpcInit); + if (pTscMgmtConn == NULL) { + tscError("failed to init connection to mgmt"); + return; + } + + tscTmr = taosTmrInit(tsMaxMgmtConnections * 2, 200, 60000, "TSC"); + + int64_t refreshTime = tsMetricMetaKeepTimer < tsMeterMetaKeepTimer ? tsMetricMetaKeepTimer : tsMeterMetaKeepTimer; + refreshTime = refreshTime > 2 ? 2 : refreshTime; + refreshTime = refreshTime < 1 ? 1 : refreshTime; + + if (tscCacheHandle == NULL) tscCacheHandle = taosInitDataCache(tsMaxMeterConnections / 2, tscTmr, refreshTime); + + tscConnCache = taosOpenConnCache(tsMaxMeterConnections * 2, taosCloseRpcConn, tscTmr, tsShellActivityTimer * 1000); + + initialized = 1; + tscTrace("taos client is initialized successfully"); + tsInsertHeadSize = tsRpcHeadSize + sizeof(SShellSubmitMsg); +} + +void taos_init() { pthread_once(&tscinit, taos_init_imp); } + +int taos_options(TSDB_OPTION option, const void *arg, ...) { + char * pStr = NULL; + SGlobalConfig *cfg_configDir = tsGetConfigOption("configDir"); + SGlobalConfig *cfg_activetimer = tsGetConfigOption("shellActivityTimer"); + SGlobalConfig *cfg_locale = tsGetConfigOption("locale"); + SGlobalConfig *cfg_charset = tsGetConfigOption("charset"); + SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); + + switch (option) { + case TSDB_OPTION_CONFIGDIR: + pStr = (char *)arg; + if (cfg_configDir && cfg_configDir->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + strncpy(configDir, pStr, TSDB_FILENAME_LEN); + cfg_configDir->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + tscPrint("set config file directory:%s", pStr); + } else { + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_configDir->option, pStr, + tsCfgStatusStr[cfg_configDir->cfgStatus], (char *)cfg_configDir->ptr); + } + break; + case TSDB_OPTION_SHELL_ACTIVITY_TIMER: + if (cfg_activetimer && cfg_activetimer->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + tsShellActivityTimer = atoi((char *)arg); + if (tsShellActivityTimer < 1) tsShellActivityTimer = 1; + if (tsShellActivityTimer > 3600) tsShellActivityTimer = 3600; + cfg_activetimer->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + tscPrint("set shellActivityTimer:%d", tsShellActivityTimer); + } else { + tscWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg_activetimer->option, pStr, + tsCfgStatusStr[cfg_activetimer->cfgStatus], (int32_t *)cfg_activetimer->ptr); + } + break; + case TSDB_OPTION_LOCALE: { // set locale + pStr = (char *)arg; + + size_t len = strlen(pStr); + if (len == 0 || len > TSDB_LOCALE_LEN) { + tscPrint("Invalid locale:%s, use default", pStr); + return -1; + } + + if (cfg_locale && cfg_charset && cfg_locale->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + char sep = '.'; + char oldLocale[64] = {0}; + strncpy(oldLocale, tsLocale, sizeof(oldLocale) / sizeof(oldLocale[0])); + + char *locale = setlocale(LC_CTYPE, pStr); + + if (locale != NULL) { + tscPrint("locale set, prev locale:%s, new locale:%s", oldLocale, locale); + cfg_locale->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + } else { + /* set the user-specified localed failed, use default LC_CTYPE as + * current locale */ + locale = setlocale(LC_CTYPE, oldLocale); + tscPrint("failed to set locale:%s, restore locale:%s", pStr, oldLocale); + } + + strncpy(tsLocale, locale, sizeof(tsLocale) / sizeof(tsLocale[0])); + + char *charset = strrchr(tsLocale, sep); + if (charset != NULL) { + charset += 1; + + charset = taosCharsetReplace(charset); + + if (taosValidateEncodec(charset)) { + tscPrint("charset changed from %s to %s", tsCharset, charset); + strncpy(tsCharset, charset, tListLen(tsCharset)); + cfg_charset->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + ; + } else { + tscPrint("charset:%s is not valid in locale, charset remains:%s", charset, tsCharset); + } + free(charset); + } else { + tscPrint("charset remains:%s", tsCharset); + } + } else { + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_locale->option, pStr, + tsCfgStatusStr[cfg_locale->cfgStatus], (char *)cfg_locale->ptr); + } + break; + } + + case TSDB_OPTION_CHARSET: { + /* set charset will override the value of charset, assigned during system locale changed */ + pStr = (char *)arg; + + char oldCharset[64] = {0}; + strncpy(oldCharset, tsCharset, tListLen(oldCharset)); + + size_t len = strlen(pStr); + if (len == 0 || len > TSDB_LOCALE_LEN) { + tscPrint("Invalid charset:%s, failed to set charset, current charset:%s", pStr, oldCharset); + return -1; + } + + if (cfg_charset && cfg_charset->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + if (taosValidateEncodec(pStr)) { + tscPrint("charset changed from %s to %s", tsCharset, pStr); + strncpy(tsCharset, pStr, tListLen(tsCharset)); + cfg_charset->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + } else { + tscPrint("charset:%s is not valid, charset remains:%s", pStr, tsCharset); + } + } else { + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_charset->option, pStr, + tsCfgStatusStr[cfg_charset->cfgStatus], (char *)cfg_charset->ptr); + } + + break; + } + + case TSDB_OPTION_TIMEZONE: + pStr = (char *)arg; + if (cfg_timezone && cfg_timezone->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) { + strcpy(tsTimezone, pStr); + tsSetTimeZone(); + cfg_timezone->cfgStatus = TSDB_CFG_CSTATUS_OPTION; + tscTrace("timezone set:%s, input:%s by taos_options", tsTimezone, pStr); + } else { + tscWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg_timezone->option, pStr, + tsCfgStatusStr[cfg_timezone->cfgStatus], (char *)cfg_timezone->ptr); + } + break; + default: + tscError("Invalid option %d", option); + return -1; + } + + return 0; +} diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c new file mode 100644 index 000000000000..fb3113bcb5bb --- /dev/null +++ b/src/client/src/tscUtil.c @@ -0,0 +1,1057 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tcache.h" +#include "tkey.h" +#include "tmd5.h" +#include "tscProfile.h" +#include "tscSecondaryMerge.h" +#include "tscUtil.h" +#include "tschemautil.h" +#include "tsclient.h" +#include "tsqldef.h" +#include "ttimer.h" + +/* + * the detailed information regarding metric meta key is: + * fullmetername + '.' + querycond + '.' + [tagId1, tagId2,...] + '.' + group_orderType + '.' + limit + '.' + offset + * if querycond is null, its format is: + * fullmetername + '.' + '(nil)' + '.' + [tagId1, tagId2,...] + '.' + group_orderType + '.' + limit + '.' + offset + */ +void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr) { + char* pTagCondStr = NULL; + const int32_t RESERVED_SIZE = 100; + + char tagIdBuf[128] = {0}; + int32_t offset = 0; + for (int32_t i = 0; i < pCmd->numOfReqTags; ++i) { + offset += sprintf(&tagIdBuf[offset], "%d,", pCmd->tagColumnIndex[i]); + } + + assert(offset < tListLen(tagIdBuf)); + size_t len = strlen(pCmd->name); + + /* for too long key, we use the md5 to generated the key for local cache */ + if (pCmd->tagCond.len >= TSDB_MAX_TAGS_LEN - RESERVED_SIZE - offset) { + MD5_CTX ctx; + MD5Init(&ctx); + MD5Update(&ctx, (uint8_t*)tsGetMetricQueryCondPos(&pCmd->tagCond), pCmd->tagCond.len); + MD5Final(&ctx); + + pTagCondStr = base64_encode(ctx.digest, tListLen(ctx.digest)); + } else if (pCmd->tagCond.len + len + offset <= TSDB_MAX_TAGS_LEN && pCmd->tagCond.len > 0) { + pTagCondStr = strdup(tsGetMetricQueryCondPos(&pCmd->tagCond)); + } + + int32_t keyLen = sprintf(keyStr, "%s.%s.[%s].%d.%lld.%lld", pCmd->name, pTagCondStr, tagIdBuf, + pCmd->groupbyExpr.orderType, pCmd->glimit.limit, pCmd->glimit.offset); + + free(pTagCondStr); + assert(keyLen <= TSDB_MAX_TAGS_LEN); +} + +char* tsGetMetricQueryCondPos(STagCond* pTagCond) { return pTagCond->pData; } + +bool tscQueryOnMetric(SSqlCmd* pCmd) { return UTIL_METER_IS_METRIC(pCmd) && pCmd->msgType == TSDB_MSG_TYPE_QUERY; } + +void tscGetDBInfoFromMeterId(char* meterId, char* db) { + char* st = strstr(meterId, TS_PATH_DELIMITER); + if (st != NULL) { + char* end = strstr(st + 1, TS_PATH_DELIMITER); + if (end != NULL) { + memcpy(db, meterId, (end - meterId)); + db[end - meterId] = 0; + return; + } + } + + db[0] = 0; +} + +SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx) { + if (pMetricmeta == NULL) { + tscError("illegal metricmeta"); + return 0; + } + + if (pMetricmeta->numOfVnodes == 0 || pMetricmeta->numOfMeters == 0) { + return 0; + } + + if (vnodeIdx < 0 || vnodeIdx >= pMetricmeta->numOfVnodes) { + int32_t vnodeRange = (pMetricmeta->numOfVnodes > 0) ? (pMetricmeta->numOfVnodes - 1) : 0; + tscError("illegal vnodeIdx:%d, reset to 0, vnodeIdx range:%d-%d", vnodeIdx, 0, vnodeRange); + + vnodeIdx = 0; + } + + return (SVnodeSidList*)(pMetricmeta->list[vnodeIdx] + (char*)pMetricmeta); +} + +SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx) { + if (pSidList == NULL) { + tscError("illegal sidlist"); + return 0; + } + + if (idx < 0 || idx >= pSidList->numOfSids) { + int32_t sidRange = (pSidList->numOfSids > 0) ? (pSidList->numOfSids - 1) : 0; + + tscError("illegal sidIdx:%d, reset to 0, sidIdx range:%d-%d", idx, 0, sidRange); + idx = 0; + } + return (SMeterSidExtInfo*)(pSidList->pSidExtInfoList[idx] + (char*)pSidList); +} + +bool tscIsTwoStageMergeMetricQuery(SSqlObj* pSql) { + assert(pSql != NULL); + + SSqlCmd* pCmd = &pSql->cmd; + if (pCmd->pMeterMeta == NULL) { + return false; + } + + if (pCmd->vnodeIdx == 0 && pCmd->command == TSDB_SQL_SELECT && (tscSqlExprGet(pCmd, 0)->sqlFuncId != TSDB_FUNC_PRJ)) { + return UTIL_METER_IS_METRIC(pCmd); + } + + return false; +} + +bool tscProjectionQueryOnMetric(SSqlObj* pSql) { + assert(pSql != NULL); + + SSqlCmd* pCmd = &pSql->cmd; + + /* + * In following cases, return false for project query on metric + * 1. failed to get metermeta from server; 2. not a metric; 3. limit 0; 4. + * show query, instead of a select query + */ + if (pCmd->pMeterMeta == NULL || !UTIL_METER_IS_METRIC(pCmd) || pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || + pCmd->exprsInfo.numOfExprs == 0) { + return false; + } + + /* + * Note:if there is COLPRJ_FUNCTION, only TAGPRJ_FUNCTION is allowed simultaneous + * for interp query, the query routine will action the same as projection query on metric + */ + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, i); + if (pExpr->sqlFuncId == TSDB_FUNC_PRJ) { + return true; + } + } + + return false; +} + +bool tscIsPointInterpQuery(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + if (pExpr == NULL) { + return false; + } + + int32_t functionId = pExpr->sqlFuncId; + if (functionId == TSDB_FUNC_TAG) { + continue; + } + + if (functionId != TSDB_FUNC_INTERP) { + return false; + } + } + + return true; +} + +bool tscIsFirstProjQueryOnMetric(SSqlObj* pSql) { + return (tscProjectionQueryOnMetric(pSql) && (pSql->cmd.vnodeIdx == 0)); +} + +void tscClearInterpInfo(SSqlCmd* pCmd) { + if (!tscIsPointInterpQuery(pCmd)) { + return; + } + + pCmd->interpoType = TSDB_INTERPO_NONE; + memset(pCmd->defaultVal, 0, sizeof(pCmd->defaultVal)); +} + +void tscClearSqlMetaInfo(SSqlCmd* pCmd) { + /* remove the metermeta/metricmeta in cache */ + taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMeterMeta), false); + taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMetricMeta), false); +} + +void tscClearSqlMetaInfoForce(SSqlCmd* pCmd) { + /* remove the metermeta/metricmeta in cache */ + taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMeterMeta), true); + taosRemoveDataFromCache(tscCacheHandle, (void**)&(pCmd->pMetricMeta), true); +} + +int32_t tscCreateResPointerInfo(SSqlCmd* pCmd, SSqlRes* pRes) { + if (pRes->tsrow == NULL) { + pRes->numOfnchar = 0; + int32_t numOfOutputCols = pCmd->fieldsInfo.numOfOutputCols; + + for (int32_t i = 0; i < numOfOutputCols; ++i) { + TAOS_FIELD* pField = tscFieldInfoGetField(pCmd, i); + if (pField->type == TSDB_DATA_TYPE_NCHAR) { + pRes->numOfnchar++; + } + } + + pRes->tsrow = calloc(1, (POINTER_BYTES + sizeof(short)) * numOfOutputCols + POINTER_BYTES * pRes->numOfnchar); + if (pRes->tsrow == NULL) { + pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; + return pRes->code; + } + + pRes->bytes = (short*)((char*)pRes->tsrow + POINTER_BYTES * numOfOutputCols); + if (pRes->numOfnchar > 0) { + pRes->buffer = (char**)((char*)pRes->bytes + sizeof(short) * numOfOutputCols); + } + } + + return TSDB_CODE_SUCCESS; +} + +void tscDestroyResPointerInfo(SSqlRes* pRes) { + // free all buffers containing the multibyte string + for (int i = 0; i < pRes->numOfnchar; i++) { + if (pRes->buffer[i] != NULL) { + tfree(pRes->buffer[i]); + } + } + + tfree(pRes->tsrow); + + pRes->numOfnchar = 0; + pRes->buffer = NULL; + pRes->bytes = NULL; +} + +void tscfreeSqlCmdData(SSqlCmd* pCmd) { + tscDestroyBlockArrayList(&pCmd->pDataBlocks); + + tscTagCondRelease(&pCmd->tagCond); + tscClearFieldInfo(pCmd); + + tfree(pCmd->exprsInfo.pExprs); + memset(&pCmd->exprsInfo, 0, sizeof(pCmd->exprsInfo)); + + tfree(pCmd->colList.pColList); + memset(&pCmd->colList, 0, sizeof(pCmd->colList)); +} + +void tscFreeSqlObjPartial(SSqlObj* pSql) { + if (pSql == NULL || pSql->signature != pSql) return; + + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + STscObj* pObj = pSql->pTscObj; + + int32_t cmd = pCmd->command; + if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_METRIC || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { + tscRemoveFromSqlList(pSql); + } + + // pSql->sqlstr will be used by tscBuildQueryStreamDesc + pthread_mutex_lock(&pObj->mutex); + tfree(pSql->sqlstr); + pthread_mutex_unlock(&pObj->mutex); + + tfree(pSql->res.pRsp); + pSql->res.row = 0; + pSql->res.numOfRows = 0; + pSql->res.numOfTotal = 0; + + pSql->res.numOfGroups = 0; + tfree(pSql->res.pGroupRec); + + tscDestroyLocalReducer(pSql); + + tfree(pSql->pSubs); + pSql->numOfSubs = 0; + tscDestroyResPointerInfo(pRes); + + tscfreeSqlCmdData(&pSql->cmd); + tscClearSqlMetaInfo(pCmd); +} + +void tscFreeSqlObj(SSqlObj* pSql) { + if (pSql == NULL || pSql->signature != pSql) return; + + tscTrace("%p start to free sql object", pSql); + tscFreeSqlObjPartial(pSql); + + pSql->signature = NULL; + pSql->fp = NULL; + + SSqlCmd* pCmd = &pSql->cmd; + + memset(pCmd->payload, 0, (size_t)tsRpcHeadSize); + tfree(pCmd->payload); + + pCmd->allocSize = 0; + + if (pSql->res.buffer != NULL) { + for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; i++) { + if (pSql->res.buffer[i] != NULL) { + tfree(pSql->res.buffer[i]); + } + } + + tfree(pSql->res.buffer); + } + + if (pSql->fp == NULL) { + sem_destroy(&pSql->rspSem); + sem_destroy(&pSql->emptyRspSem); + } + + free(pSql); +} + +SInsertedDataBlocks* tscCreateDataBlock(int32_t size) { + SInsertedDataBlocks* dataBuf = (SInsertedDataBlocks*)calloc(1, sizeof(SInsertedDataBlocks)); + dataBuf->nAllocSize = (uint32_t) size; + dataBuf->pData = calloc(1, dataBuf->nAllocSize); + return dataBuf; +} + +void tscDestroyDataBlock(SInsertedDataBlocks** pDataBlock) { + if (*pDataBlock == NULL) { + return; + } + + tfree((*pDataBlock)->pData); + tfree(*pDataBlock); +} + +SDataBlockList* tscCreateBlockArrayList() { + const int32_t DEFAULT_INITIAL_NUM_OF_BLOCK = 16; + + SDataBlockList* pDataBlockArrayList = calloc(1, sizeof(SDataBlockList)); + pDataBlockArrayList->nAlloc = DEFAULT_INITIAL_NUM_OF_BLOCK; + pDataBlockArrayList->pData = calloc(1, POINTER_BYTES * pDataBlockArrayList->nAlloc); + + return pDataBlockArrayList; +} + +void tscDestroyBlockArrayList(SDataBlockList** pList) { + if (*pList == NULL) { + return; + } + + for (int32_t i = 0; i < (*pList)->nSize; i++) { + tscDestroyDataBlock(&(*pList)->pData[i]); + } + + tfree((*pList)->pData); + tfree(*pList); +} + +int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, SInsertedDataBlocks* pDataBlock) { + SSqlCmd* pCmd = &pSql->cmd; + + pCmd->count = pDataBlock->numOfMeters; + strcpy(pCmd->name, pDataBlock->meterId); + + tscAllocPayloadWithSize(pCmd, pDataBlock->nAllocSize); + memcpy(pCmd->payload, pDataBlock->pData, pDataBlock->nAllocSize); + + /* set the message length */ + pCmd->payloadLen = pDataBlock->nAllocSize; + return tscGetMeterMeta(pSql, pCmd->name); +} + +void tscFreeUnusedDataBlocks(SDataBlockList* pList) { + /* release additional memory consumption */ + for (int32_t i = 0; i < pList->nSize; ++i) { + SInsertedDataBlocks* pDataBlock = pList->pData[i]; + pDataBlock->pData = realloc(pDataBlock->pData, (size_t) pDataBlock->size); + pDataBlock->nAllocSize = (uint32_t) pDataBlock->size; + } +} + +void tscCloseTscObj(STscObj* pObj) { + pObj->signature = NULL; + SSqlObj* pSql = pObj->pSql; + globalCode = pSql->res.code; + + taosTmrStopA(&(pObj->pTimer)); + tscFreeSqlObj(pSql); + + pthread_mutex_destroy(&pObj->mutex); + tscTrace("%p DB connection is closed", pObj); + tfree(pObj); +} + +bool tscIsInsertOrImportData(char* sqlstr) { + SSQLToken t0 = {0}; + while (1) { + t0.n = tSQLGetToken(sqlstr, &t0.type); + if (t0.type != TK_SPACE) { + break; + } + + sqlstr += t0.n; + } + + return t0.type == TK_INSERT || t0.type == TK_IMPORT; +} + +int tscAllocPayloadWithSize(SSqlCmd* pCmd, int size) { + assert(size > 0); + + if (pCmd->payload == NULL) { + assert(pCmd->allocSize == 0); + + pCmd->payload = (char*)calloc(1, size); + if (pCmd->payload == NULL) return TSDB_CODE_CLI_OUT_OF_MEMORY; + + pCmd->allocSize = size; + } else { + if (pCmd->allocSize < size) { + pCmd->payload = realloc(pCmd->payload, size); + if (pCmd->payload == NULL) return TSDB_CODE_CLI_OUT_OF_MEMORY; + pCmd->allocSize = size; + } + } + + memset(pCmd->payload, 0, pCmd->allocSize); + assert(pCmd->allocSize >= size); + + return TSDB_CODE_SUCCESS; +} + +static void ensureSpace(SFieldInfo* pFieldInfo, int32_t size) { + if (size > pFieldInfo->numOfAlloc) { + int32_t oldSize = pFieldInfo->numOfAlloc; + + int32_t newSize = (oldSize <= 0) ? 8 : (oldSize << 1); + while (newSize < size) { + newSize = (newSize << 1); + } + + if (newSize > TSDB_MAX_COLUMNS) { + newSize = TSDB_MAX_COLUMNS; + } + + int32_t inc = newSize - oldSize; + + pFieldInfo->pFields = realloc(pFieldInfo->pFields, newSize * sizeof(TAOS_FIELD)); + memset(&pFieldInfo->pFields[oldSize], 0, inc * sizeof(TAOS_FIELD)); + + pFieldInfo->pOffset = realloc(pFieldInfo->pOffset, newSize * sizeof(int16_t)); + memset(&pFieldInfo->pOffset[oldSize], 0, inc * sizeof(int16_t)); + + pFieldInfo->numOfAlloc = newSize; + } +} + +static void evic(SFieldInfo* pFieldInfo, int32_t index) { + if (index < pFieldInfo->numOfOutputCols) { + memmove(&pFieldInfo->pFields[index + 1], &pFieldInfo->pFields[index], + sizeof(pFieldInfo->pFields[0]) * (pFieldInfo->numOfOutputCols - index)); + } +} + +static void setValueImpl(TAOS_FIELD* pField, int8_t type, char* name, int16_t bytes) { + pField->type = type; + strncpy(pField->name, name, TSDB_COL_NAME_LEN); + pField->bytes = bytes; +} + +void tscFieldInfoSetValFromSchema(SFieldInfo* pFieldInfo, int32_t index, SSchema* pSchema) { + ensureSpace(pFieldInfo, pFieldInfo->numOfOutputCols + 1); + evic(pFieldInfo, index); + + TAOS_FIELD* pField = &pFieldInfo->pFields[index]; + setValueImpl(pField, pSchema->type, pSchema->name, pSchema->bytes); + pFieldInfo->numOfOutputCols++; +} + +void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* pField) { + ensureSpace(pFieldInfo, pFieldInfo->numOfOutputCols + 1); + evic(pFieldInfo, index); + + memcpy(&pFieldInfo->pFields[index], pField, sizeof(TAOS_FIELD)); + pFieldInfo->numOfOutputCols++; +} + +void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, char* name, int16_t bytes) { + ensureSpace(pFieldInfo, pFieldInfo->numOfOutputCols + 1); + evic(pFieldInfo, index); + + TAOS_FIELD* pField = &pFieldInfo->pFields[index]; + setValueImpl(pField, type, name, bytes); + pFieldInfo->numOfOutputCols++; +} + +void tscFieldInfoCalOffset(SSqlCmd* pCmd) { + SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; + pFieldInfo->pOffset[0] = 0; + + for (int32_t i = 1; i < pFieldInfo->numOfOutputCols; ++i) { + pFieldInfo->pOffset[i] = pFieldInfo->pOffset[i - 1] + pFieldInfo->pFields[i - 1].bytes; + } +} + +void tscFieldInfoRenewOffsetForInterResult(SSqlCmd* pCmd) { + SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; + if (pFieldInfo->numOfOutputCols == 0) { + return; + } + + pFieldInfo->pOffset[0] = 0; + + /* + * the retTypeLen is used to store the intermediate result length + * for potential secondary merge exists + */ + for (int32_t i = 1; i < pFieldInfo->numOfOutputCols; ++i) { + pFieldInfo->pOffset[i] = pFieldInfo->pOffset[i - 1] + tscSqlExprGet(pCmd, i - 1)->resBytes; + } +} + +void tscFieldInfoClone(SFieldInfo* src, SFieldInfo* dst) { + if (src == NULL) { + return; + } + + *dst = *src; + + dst->pFields = malloc(sizeof(TAOS_FIELD) * dst->numOfAlloc); + dst->pOffset = malloc(sizeof(short) * dst->numOfAlloc); + + memcpy(dst->pFields, src->pFields, sizeof(TAOS_FIELD) * dst->numOfOutputCols); + memcpy(dst->pOffset, src->pOffset, sizeof(short) * dst->numOfOutputCols); +} + +TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index) { + if (index >= pCmd->fieldsInfo.numOfOutputCols) { + return NULL; + } + + return &pCmd->fieldsInfo.pFields[index]; +} + +int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index) { + if (index >= pCmd->fieldsInfo.numOfOutputCols) { + return 0; + } + + return pCmd->fieldsInfo.pOffset[index]; +} + +int32_t tscGetResRowLength(SSqlCmd* pCmd) { + SFieldInfo* pFieldInfo = &pCmd->fieldsInfo; + if (pFieldInfo->numOfOutputCols <= 0) { + return 0; + } + + return pFieldInfo->pOffset[pFieldInfo->numOfOutputCols - 1] + + pFieldInfo->pFields[pFieldInfo->numOfOutputCols - 1].bytes; +} + +void tscClearFieldInfo(SSqlCmd* pCmd) { + if (pCmd == NULL) { + return; + } + + tfree(pCmd->fieldsInfo.pOffset); + tfree(pCmd->fieldsInfo.pFields); + memset(&pCmd->fieldsInfo, 0, sizeof(pCmd->fieldsInfo)); +} + +static void _exprCheckSpace(SSqlExprInfo* pExprInfo, int32_t size) { + if (size > pExprInfo->numOfAlloc) { + int32_t oldSize = pExprInfo->numOfAlloc; + + int32_t newSize = (oldSize <= 0) ? 8 : (oldSize << 1); + while (newSize < size) { + newSize = (newSize << 1); + } + + if (newSize > TSDB_MAX_COLUMNS) { + newSize = TSDB_MAX_COLUMNS; + } + + int32_t inc = newSize - oldSize; + + pExprInfo->pExprs = realloc(pExprInfo->pExprs, newSize * sizeof(SSqlExpr)); + memset(&pExprInfo->pExprs[oldSize], 0, inc * sizeof(SSqlExpr)); + + pExprInfo->numOfAlloc = newSize; + } +} + +static void _exprEvic(SSqlExprInfo* pExprInfo, int32_t index) { + if (index < pExprInfo->numOfExprs) { + memmove(&pExprInfo->pExprs[index + 1], &pExprInfo->pExprs[index], + sizeof(pExprInfo->pExprs[0]) * (pExprInfo->numOfExprs - index)); + } +} + +SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, + int16_t size) { + SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + _exprCheckSpace(pExprInfo, pExprInfo->numOfExprs + 1); + _exprEvic(pExprInfo, index); + + SSqlExpr* pExpr = &pExprInfo->pExprs[index]; + + pExpr->sqlFuncId = functionId; + + pExpr->colInfo.colIdx = srcColumnIndex; + if (srcColumnIndex == -1) { + pExpr->colInfo.colId = -1; + } else { + pExpr->colInfo.colId = pSchema[srcColumnIndex].colId; + } + + pExpr->colInfo.isTag = false; + pExpr->resType = type; + pExpr->resBytes = size; + + pExprInfo->numOfExprs++; + return pExpr; +} + +SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, + int16_t size) { + SSqlExprInfo* pExprInfo = &pCmd->exprsInfo; + if (index > pExprInfo->numOfExprs) { + return NULL; + } + + SSqlExpr* pExpr = &pExprInfo->pExprs[index]; + + pExpr->sqlFuncId = functionId; + + pExpr->colInfo.colIdx = srcColumnIndex; + pExpr->colInfo.colId = tsGetSchemaColIdx(pCmd->pMeterMeta, srcColumnIndex)->colId; + + pExpr->resType = type; + pExpr->resBytes = size; + + return pExpr; +} + +void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) { + if (pExpr == NULL || argument == NULL || bytes == 0) { + return; + } + + // set parameter value + // transfer to tVariant from byte data/no ascii data + tVariantCreateB(&pExpr->param[pExpr->numOfParams], argument, bytes, type); + + pExpr->numOfParams += 1; + assert(pExpr->numOfParams <= 3); +} + +SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index) { + if (pCmd->exprsInfo.numOfExprs <= index) { + return NULL; + } + + return &pCmd->exprsInfo.pExprs[index]; +} + +void tscSqlExprClone(SSqlExprInfo* src, SSqlExprInfo* dst) { + if (src == NULL) { + return; + } + + *dst = *src; + + dst->pExprs = malloc(sizeof(SSqlExpr) * dst->numOfAlloc); + memcpy(dst->pExprs, src->pExprs, sizeof(SSqlExpr) * dst->numOfExprs); + + for (int32_t i = 0; i < dst->numOfExprs; ++i) { + for (int32_t j = 0; j < src->pExprs[i].numOfParams; ++j) { + tVariantAssign(&dst->pExprs[i].param[j], &src->pExprs[i].param[j]); + } + } +} + +static void _cf_ensureSpace(SColumnsInfo* pcolList, int32_t size) { + if (pcolList->numOfAlloc < size) { + int32_t oldSize = pcolList->numOfAlloc; + + int32_t newSize = (oldSize <= 0) ? 8 : (oldSize << 1); + while (newSize < size) { + newSize = (newSize << 1); + } + + if (newSize > TSDB_MAX_COLUMNS) { + newSize = TSDB_MAX_COLUMNS; + } + + int32_t inc = newSize - oldSize; + + pcolList->pColList = realloc(pcolList->pColList, newSize * sizeof(SColumnBase)); + memset(&pcolList->pColList[oldSize], 0, inc * sizeof(SColumnBase)); + + pcolList->numOfAlloc = newSize; + } +} + +static void _cf_evic(SColumnsInfo* pcolList, int32_t index) { + if (index < pcolList->numOfCols) { + memmove(&pcolList->pColList[index + 1], &pcolList->pColList[index], + sizeof(SColumnBase) * (pcolList->numOfCols - index)); + + memset(&pcolList->pColList[index], 0, sizeof(SColumnBase)); + } +} + +SColumnBase* tscColumnInfoGet(SSqlCmd* pCmd, int32_t index) { + if (pCmd->colList.numOfCols < index) { + return NULL; + } + + return &pCmd->colList.pColList[index]; +} + +SColumnBase* tscColumnInfoInsert(SSqlCmd* pCmd, int32_t colIndex) { + SColumnsInfo* pcolList = &pCmd->colList; + + if (colIndex < 0) { + /* ignore the tbname column to be inserted into source list */ + return NULL; + } + + int32_t i = 0; + while (i < pcolList->numOfCols && pcolList->pColList[i].colIndex < colIndex) { + i++; + } + + if ((i < pcolList->numOfCols && pcolList->pColList[i].colIndex > colIndex) || (i >= pcolList->numOfCols)) { + _cf_ensureSpace(pcolList, pcolList->numOfCols + 1); + _cf_evic(pcolList, i); + + pcolList->pColList[i].colIndex = (int16_t)colIndex; + pcolList->numOfCols++; + pCmd->numOfCols++; + } + + return &pcolList->pColList[i]; +} + +void tscColumnInfoClone(SColumnsInfo* src, SColumnsInfo* dst) { + if (src == NULL) { + return; + } + + *dst = *src; + + dst->pColList = malloc(sizeof(SColumnBase) * dst->numOfAlloc); + memcpy(dst->pColList, src->pColList, sizeof(SColumnBase) * dst->numOfCols); +} + +void tscColumnInfoReserve(SSqlCmd* pCmd, int32_t size) { _cf_ensureSpace(&pCmd->colList, size); } + +/* + * 1. normal name, not a keyword or number + * 2. name with quote + * 3. string with only one delimiter '.'. + * + * only_one_part + * 'only_one_part' + * first_part.second_part + * first_part.'second_part' + * 'first_part'.second_part + * 'first_part'.'second_part' + * 'first_part.second_part' + * + */ + +static int32_t validateQuoteToken(SSQLToken* pToken) { + strdequote(pToken->z); + strtrim(pToken->z); + pToken->n = (uint32_t)strlen(pToken->z); + + int32_t k = tSQLGetToken(pToken->z, &pToken->type); + if (k != pToken->n || pToken->type != TK_ID) { + return TSDB_CODE_INVALID_SQL; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t tscValidateName(SSQLToken* pToken) { + if (pToken->type != TK_STRING && pToken->type != TK_ID) { + return TSDB_CODE_INVALID_SQL; + } + + char* sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n); + if (sep == NULL) { // single part + if (pToken->type == TK_STRING) { + return validateQuoteToken(pToken); + } else { + if (isNumber(pToken)) { + return TSDB_CODE_INVALID_SQL; + } + } + } else { // two part + int32_t oldLen = pToken->n; + char* pStr = pToken->z; + + pToken->n = tSQLGetToken(pToken->z, &pToken->type); + if (pToken->z[pToken->n] != TS_PATH_DELIMITER[0]) { + return TSDB_CODE_INVALID_SQL; + } + + if (pToken->type != TK_STRING && pToken->type != TK_ID) { + return TSDB_CODE_INVALID_SQL; + } + + if (pToken->type == TK_STRING && validateQuoteToken(pToken) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + int32_t firstPartLen = pToken->n; + + pToken->z = sep + 1; + pToken->n = oldLen - (sep - pStr) - 1; + int32_t len = tSQLGetToken(pToken->z, &pToken->type); + if (len != pToken->n || (pToken->type != TK_STRING && pToken->type != TK_ID)) { + return TSDB_CODE_INVALID_SQL; + } + + if (pToken->type == TK_STRING && validateQuoteToken(pToken) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_INVALID_SQL; + } + + // re-build the whole name string + if (pStr[firstPartLen] == TS_PATH_DELIMITER[0]) { + // first part do not have quote + // do nothing + } else { + pStr[firstPartLen] = TS_PATH_DELIMITER[0]; + memmove(&pStr[firstPartLen + 1], pToken->z, pToken->n); + pStr[firstPartLen + sizeof(TS_PATH_DELIMITER[0]) + pToken->n] = 0; + } + pToken->n += (firstPartLen + sizeof(TS_PATH_DELIMITER[0])); + pToken->z = pStr; + } + + return TSDB_CODE_SUCCESS; +} + +void tscIncStreamExecutionCount(void* pStream) { + if (pStream == NULL) { + return; + } + + SSqlStream* ps = (SSqlStream*)pStream; + ps->num += 1; +} + +bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) { + if (pCmd->pMeterMeta == NULL) { + return false; + } + + if (colId == -1 && UTIL_METER_IS_METRIC(pCmd)) { + return true; + } + + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + int32_t numOfTotal = pCmd->pMeterMeta->numOfTags + pCmd->pMeterMeta->numOfColumns; + + for (int32_t i = 0; i < numOfTotal; ++i) { + if (pSchema[i].colId == colId) { + return true; + } + } + + return false; +} + +void tscTagCondAssign(STagCond* pDst, STagCond* pSrc) { + if (pSrc->len == 0) { + memset(pDst, 0, sizeof(STagCond)); + return; + } + + pDst->pData = strdup(pSrc->pData); + pDst->allocSize = pSrc->len + 1; + pDst->type = pSrc->type; + pDst->len = pSrc->len; +} + +void tscTagCondRelease(STagCond* pCond) { + if (pCond->allocSize > 0) { + assert(pCond->pData != NULL); + tfree(pCond->pData); + } + + memset(pCond, 0, sizeof(STagCond)); +} + +void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd) { + SSchema* pSchema = tsGetSchema(pCmd->pMeterMeta); + + for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + pColInfo[i].functionId = pExpr->sqlFuncId; + + if (pExpr->colInfo.isTag) { + SSchema* pTagSchema = tsGetTagSchema(pCmd->pMeterMeta); + int16_t actualTagIndex = pCmd->tagColumnIndex[pExpr->colInfo.colIdx]; + + pColInfo[i].type = (actualTagIndex != -1) ? pTagSchema[actualTagIndex].type : TSDB_DATA_TYPE_BINARY; + } else { + pColInfo[i].type = pSchema[pExpr->colInfo.colIdx].type; + } + } +} + +void tscSetFreeHeatBeat(STscObj* pObj) { + if (pObj == NULL || pObj->signature != pObj || pObj->pHb == NULL) { + return; + } + + SSqlObj* pHeatBeat = pObj->pHb; + assert(pHeatBeat == pHeatBeat->signature); + + pHeatBeat->cmd.type = 1; // to denote the heart-beat timer close connection + // and free all allocated resources +} + +bool tscShouldFreeHeatBeat(SSqlObj* pHb) { + assert(pHb == pHb->signature); + + return pHb->cmd.type == 1; +} + +void tscCleanSqlCmd(SSqlCmd* pCmd) { + tscfreeSqlCmdData(pCmd); + + uint32_t allocSize = pCmd->allocSize; + char* allocPtr = pCmd->payload; + SMeterMeta* pMeterMeta = pCmd->pMeterMeta; + SMetricMeta* pMetricMeta = pCmd->pMetricMeta; + + memset(pCmd, 0, sizeof(SSqlCmd)); + + // restore values + pCmd->allocSize = allocSize; + pCmd->payload = allocPtr; + pCmd->pMeterMeta = pMeterMeta; + pCmd->pMetricMeta = pMetricMeta; +} + +/* + * the following three kinds of SqlObj should not be freed + * 1. SqlObj for stream computing + * 2. main SqlObj + * 3. heartbeat SqlObj + * + * If res code is error and SqlObj does not belong to above types, it should be + * automatically freed for async query, ignoring that connection should be kept. + * + * If connection need to be recycled, the SqlObj also should be freed. + */ +bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { + if (pSql == NULL || pSql->signature != pSql || pSql->fp == NULL) { + return false; + } + + STscObj* pTscObj = pSql->pTscObj; + if (pSql->pStream != NULL || pTscObj->pHb == pSql) { + return false; + } + + int32_t command = pSql->cmd.command; + if (pTscObj->pSql == pSql) { + /* + * in case of taos_connect_a query, the object should all be released, even it is the + * master sql object. Otherwise, the master sql should not be released + */ + if (command == TSDB_SQL_CONNECT && pSql->res.code != TSDB_CODE_SUCCESS) { + return true; + } + + return false; + } + + if (command == TSDB_SQL_INSERT) { + SSqlCmd* pCmd = &pSql->cmd; + + /* + * in case of multi-vnode insertion, the object should not be released until all + * data blocks have been submit to vnode. + */ + SDataBlockList* pDataBlocks = pCmd->pDataBlocks; + if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + tscTrace("%p object should be release since all data blocks have been submit", pSql); + return true; + } else { + return false; + } + } else { + return tscKeepConn[command] == 0 || + (pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS && pSql->res.code != TSDB_CODE_SUCCESS); + } +} + +void tscDoQuery(SSqlObj* pSql) { + SSqlCmd* pCmd = &pSql->cmd; + + if (pCmd->command > TSDB_SQL_LOCAL) { + tscProcessLocalCmd(pSql); + } else { + // add to sql list, so that the show queries could get the query info + if (pCmd->command == TSDB_SQL_SELECT) { + tscAddIntoSqlList(pSql); + } + + if (tscIsFirstProjQueryOnMetric(pSql)) { + pSql->cmd.vnodeIdx += 1; + } + + if (pSql->fp == NULL) { + if (0 == pCmd->isInsertFromFile) { + tscProcessSql(pSql); + tscProcessMultiVnodesInsert(pSql); // handle the multi-vnode insertion + } else if (1 == pCmd->isInsertFromFile) { + tscProcessMultiVnodesInsertForFile(pSql); + } else { + assert(false); + } + } else { + tscProcessSql(pSql); + } + + } +} diff --git a/src/connector/go/src/taosSql/connection.go b/src/connector/go/src/taosSql/connection.go new file mode 100755 index 000000000000..2a3f5a9acd8c --- /dev/null +++ b/src/connector/go/src/taosSql/connection.go @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package taosSql + +import "C" +import ( + "context" + "errors" + "database/sql/driver" + "unsafe" + "strconv" + "strings" + "time" +) + +type taosConn struct { + taos unsafe.Pointer + affectedRows int + insertId int + cfg *config + status statusFlag + parseTime bool + reset bool // set when the Go SQL package calls ResetSession +} + +type taosSqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *taosSqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *taosSqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} + +func (mc *taosConn) Begin() (driver.Tx, error) { + taosLog.Println("taosSql not support transaction") + return nil, errors.New("taosSql not support transaction") +} + +func (mc *taosConn) Close() (err error) { + if mc.taos == nil { + return errConnNoExist + } + mc.taos_close() + return nil +} + +func (mc *taosConn) Prepare(query string) (driver.Stmt, error) { + if mc.taos == nil { + return nil, errInvalidConn + } + + stmt := &taosSqlStmt{ + mc: mc, + pSql: query, + } + + // find ? count and save to stmt.paramCount + stmt.paramCount = strings.Count(query, "?") + + //fmt.Printf("prepare alloc stmt:%p, sql:%s\n", stmt, query) + taosLog.Printf("prepare alloc stmt:%p, sql:%s\n", stmt, query) + + return stmt, nil +} + +func (mc *taosConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf := make([]byte, defaultBufSize) + buf = buf[:0] // clear buf + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case uint64: + // Handle uint64 explicitly because our custom ConvertValue emits unsigned values + buf = strconv.AppendUint(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + //buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + //buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + //if len(buf)+4 > mc.maxAllowedPacket { + if len(buf)+4 > maxTaosSqlLen { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *taosConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.taos == nil { + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + + mc.affectedRows = 0 + mc.insertId = 0 + _, err := mc.taosQuery(query) + if err == nil { + return &taosSqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + + return nil, err +} + +func (mc *taosConn) Query(query string, args []driver.Value) (driver.Rows, error) { + return mc.query(query, args) +} + +func (mc *taosConn) query(query string, args []driver.Value) (*textRows, error) { + if mc.taos == nil { + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + + num_fields, err := mc.taosQuery(query) + if err == nil { + // Read Result + rows := new(textRows) + rows.mc = mc + + // Columns field + rows.rs.columns, err = mc.readColumns(num_fields) + return rows, err + } + return nil, err +} + +// Ping implements driver.Pinger interface +func (mc *taosConn) Ping(ctx context.Context) (err error) { + if mc.taos != nil { + return nil + } + return errInvalidConn +} + +// BeginTx implements driver.ConnBeginTx interface +func (mc *taosConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + taosLog.Println("taosSql not support transaction") + return nil, errors.New("taosSql not support transaction") +} + +func (mc *taosConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + if mc.taos == nil { + return nil, errInvalidConn + } + + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + rows, err := mc.query(query, dargs) + if err != nil { + return nil, err + } + + return rows, err +} + +func (mc *taosConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + if mc.taos == nil { + return nil, errInvalidConn + } + + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + return mc.Exec(query, dargs) +} + +func (mc *taosConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if mc.taos == nil { + return nil, errInvalidConn + } + + stmt, err := mc.Prepare(query) + if err != nil { + return nil, err + } + + return stmt, nil +} + +func (stmt *taosSqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + if stmt.mc == nil { + return nil, errInvalidConn + } + dargs, err := namedValueToValue(args) + + if err != nil { + return nil, err + } + + rows, err := stmt.query(dargs) + if err != nil { + return nil, err + } + return rows, err +} + +func (stmt *taosSqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + if stmt.mc == nil { + return nil, errInvalidConn + } + + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + return stmt.Exec(dargs) +} + +func (mc *taosConn) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} + +// ResetSession implements driver.SessionResetter. +// (From Go 1.10) +func (mc *taosConn) ResetSession(ctx context.Context) error { + if mc.taos == nil { + return driver.ErrBadConn + } + mc.reset = true + return nil +} diff --git a/src/connector/go/src/taosSql/connector.go b/src/connector/go/src/taosSql/connector.go new file mode 100755 index 000000000000..55715b949ede --- /dev/null +++ b/src/connector/go/src/taosSql/connector.go @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package taosSql + +import ( + "context" + "database/sql/driver" +) + +type connector struct { + cfg *config +} + +// Connect implements driver.Connector interface. +// Connect returns a connection to the database. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + var err error + // New taosConn + mc := &taosConn{ + cfg: c.cfg, + parseTime: c.cfg.parseTime, + } + + // Connect to Server + mc.taos, err = mc.taosConnect(mc.cfg.addr, mc.cfg.user, mc.cfg.passwd, mc.cfg.dbName, mc.cfg.port) + if err != nil { + return nil, err + } + + return mc, nil +} + +// Driver implements driver.Connector interface. +// Driver returns &taosSQLDriver{}. +func (c *connector) Driver() driver.Driver { + return &taosSQLDriver{} +} diff --git a/src/connector/go/src/taosSql/const.go b/src/connector/go/src/taosSql/const.go new file mode 100755 index 000000000000..89214a0d45c5 --- /dev/null +++ b/src/connector/go/src/taosSql/const.go @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package taosSql + +const ( + timeFormat = "2006-01-02 15:04:05" + maxTaosSqlLen = 65380 + defaultBufSize = maxTaosSqlLen + 32 +) + +type fieldType byte + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota +) + +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) + diff --git a/src/connector/go/src/taosSql/driver.go b/src/connector/go/src/taosSql/driver.go new file mode 100755 index 000000000000..6839979c61c1 --- /dev/null +++ b/src/connector/go/src/taosSql/driver.go @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package taosSql + +import ( + "context" + "database/sql" + "database/sql/driver" +) + +// taosSqlDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type taosSQLDriver struct{} + +// Open new Connection. +// the DSN string is formatted +func (d taosSQLDriver) Open(dsn string) (driver.Conn, error) { + cfg, err := parseDSN(dsn) + if err != nil { + return nil, err + } + c := &connector{ + cfg: cfg, + } + return c.Connect(context.Background()) +} + +func init() { + sql.Register("taosSql", &taosSQLDriver{}) + taosLogInit() +} diff --git a/src/connector/go/src/taosSql/dsn.go b/src/connector/go/src/taosSql/dsn.go new file mode 100755 index 000000000000..be31462f1ddf --- /dev/null +++ b/src/connector/go/src/taosSql/dsn.go @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package taosSql + +import ( + "errors" + "net/url" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") +) + +// Config is a configuration parsed from a DSN string. +// If a new Config is created instead of being parsed from a DSN string, +// the NewConfig function should be used, which sets default values. +type config struct { + user string // Username + passwd string // Password (requires User) + net string // Network type + addr string // Network address (requires Net) + port int + dbName string // Database name + params map[string]string // Connection parameters + loc *time.Location // Location for time.Time values + columnsWithAlias bool // Prepend table alias to column names + interpolateParams bool // Interpolate placeholders into query string + parseTime bool // Parse time values to time.Time +} + +// NewConfig creates a new Config and sets default values. +func newConfig() *config { + return &config{ + loc: time.UTC, + interpolateParams: true, + parseTime: true, + } +} + +// ParseDSN parses the DSN string to a Config +func parseDSN(dsn string) (cfg *config, err error) { + taosLog.Println("input dsn:", dsn) + + // New config with some default values + cfg = newConfig() + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.passwd = dsn[k+1 : j] + break + } + } + cfg.user = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.addr = dsn[k+1 : i-1] + break + } + } + cfg.net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.dbName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + taosLog.Printf("cfg info: %+v", cfg) + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + case "columnsWithAlias": + var isBool bool + cfg.columnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.interpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.parseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + default: + // lazy init + if cfg.params == nil { + cfg.params = make(map[string]string) + } + + if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + diff --git a/src/connector/go/src/taosSql/result.go b/src/connector/go/src/taosSql/result.go new file mode 100755 index 000000000000..a57ca7bae35a --- /dev/null +++ b/src/connector/go/src/taosSql/result.go @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package taosSql + +/* +#cgo CFLAGS : -I/usr/local/include/taos/ +#cgo LDFLAGS: -L/usr/local/lib/taos -ltaos +#include +#include +#include +#include +*/ +import "C" + +import ( + "database/sql/driver" + "errors" + "strconv" + "unsafe" + "fmt" + "io" + "time" +) + +/****************************************************************************** +* Result * +******************************************************************************/ +// Read Packets as Field Packets until EOF-Packet or an Error appears +func (mc *taosConn) readColumns(count int) ([]taosSqlField, error) { + + columns := make([]taosSqlField, count) + var result unsafe.Pointer + result = C.taos_use_result(mc.taos) + if result == nil { + return nil , errors.New("invalid result") + } + + pFields := (*C.struct_taosField)(C.taos_fetch_fields(result)) + + // TODO: Optimized rewriting !!!! + fields := (*[1 << 30]C.struct_taosField)(unsafe.Pointer(pFields)) + + for i := 0; i int of C + break + + case C.TSDB_DATA_TYPE_BIGINT: + dest[i] = (int64)(*((*int64)(currentRow))) + break + + case C.TSDB_DATA_TYPE_FLOAT: + dest[i] = (*((*float32)(currentRow))) + break + + case C.TSDB_DATA_TYPE_DOUBLE: + dest[i] = (*((*float64)(currentRow))) + break + + case C.TSDB_DATA_TYPE_BINARY, C.TSDB_DATA_TYPE_NCHAR: + charLen := rows.rs.columns[i].length + var index uint32 + binaryVal := make([]byte, charLen) + for index=0; index < charLen; index++ { + binaryVal[index] = *((*byte)(unsafe.Pointer(uintptr(currentRow) + uintptr(index)))) + } + dest[i] = string(binaryVal[:]) + break + + case C.TSDB_DATA_TYPE_TIMESTAMP: + if mc.cfg.parseTime == true { + timestamp := (int64)(*((*int64)(currentRow))) + dest[i] = timestampConvertToString(timestamp, int(C.taos_result_precision(result))) + }else { + dest[i] = (int64)(*((*int64)(currentRow))) + } + break + + default: + fmt.Println("default fieldType: set dest[] to nil") + dest[i] = nil + break + } + } + + return nil +} + +// Read result as Field format until all rows or an Error appears +// call this func in conn mode +func (rows *textRows) readRow(dest []driver.Value) error { + return rows.taosSqlRows.readRow(dest) +} + +// call thsi func in stmt mode +func (rows *binaryRows) readRow(dest []driver.Value) error { + return rows.taosSqlRows.readRow(dest) +} + +func timestampConvertToString(timestamp int64, precision int) string { + var decimal, sVal, nsVal int64 + if precision == 0 { + decimal = timestamp % 1000 + sVal = timestamp / 1000 + nsVal = decimal * 1000 + } else { + decimal = timestamp % 1000000 + sVal = timestamp / 1000000 + nsVal = decimal * 1000000 + } + + date_time := time.Unix(sVal, nsVal) + + //const base_format = "2006-01-02 15:04:05" + str_time := date_time.Format(timeFormat) + + return (str_time + "." + strconv.Itoa(int(decimal))) +} diff --git a/src/connector/go/src/taosSql/rows.go b/src/connector/go/src/taosSql/rows.go new file mode 100755 index 000000000000..f9177420c8d6 --- /dev/null +++ b/src/connector/go/src/taosSql/rows.go @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package taosSql +/* +#cgo CFLAGS : -I/usr/local/include/taos/ +#cgo LDFLAGS: -L/usr/local/lib/taos -ltaos +#include +#include +#include +#include +*/ +import "C" + +import ( + "database/sql" + "database/sql/driver" + "io" + "math" + "reflect" +) + +type taosSqlField struct { + tableName string + name string + length uint32 + flags fieldFlag // indicate whether this field can is null + fieldType fieldType + decimals byte + charSet uint8 +} + +type resultSet struct { + columns []taosSqlField + columnNames []string + done bool +} + +type taosSqlRows struct { + mc *taosConn + rs resultSet +} + +type binaryRows struct { + taosSqlRows +} + +type textRows struct { + taosSqlRows +} + +func (rows *taosSqlRows) Columns() []string { + if rows.rs.columnNames != nil { + return rows.rs.columnNames + } + + columns := make([]string, len(rows.rs.columns)) + if rows.mc != nil && rows.mc.cfg.columnsWithAlias { + for i := range columns { + if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.rs.columns[i].name + } else { + columns[i] = rows.rs.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.rs.columns[i].name + } + } + + rows.rs.columnNames = columns + + return columns +} + +func (rows *taosSqlRows) ColumnTypeDatabaseTypeName(i int) string { + return rows.rs.columns[i].typeDatabaseName() +} + +func (rows *taosSqlRows) ColumnTypeLength(i int) (length int64, ok bool) { + return int64(rows.rs.columns[i].length), true +} + +func (rows *taosSqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { + return rows.rs.columns[i].flags&flagNotNULL == 0, true +} + +func (rows *taosSqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { + column := rows.rs.columns[i] + decimals := int64(column.decimals) + + switch column.fieldType { + case C.TSDB_DATA_TYPE_FLOAT: + fallthrough + case C.TSDB_DATA_TYPE_DOUBLE: + if decimals == 0x1f { + return math.MaxInt64, math.MaxInt64, true + } + return math.MaxInt64, decimals, true + } + + return 0, 0, false +} + +func (rows *taosSqlRows) ColumnTypeScanType(i int) reflect.Type { + return rows.rs.columns[i].scanType() +} + +func (rows *taosSqlRows) Close() (err error) { + mc := rows.mc + if mc == nil { + return nil + } + + rows.mc = nil + return err +} + +func (rows *taosSqlRows) HasNextResultSet() (b bool) { + if rows.mc == nil { + return false + } + return rows.mc.status&statusMoreResultsExists != 0 +} + +func (rows *taosSqlRows) nextResultSet() (int, error) { + if rows.mc == nil { + return 0, io.EOF + } + + // Remove unread packets from stream + if !rows.rs.done { + rows.rs.done = true + } + + if !rows.HasNextResultSet() { + rows.mc = nil + return 0, io.EOF + } + rows.rs = resultSet{} + return 0,nil +} + +func (rows *taosSqlRows) nextNotEmptyResultSet() (int, error) { + for { + resLen, err := rows.nextResultSet() + if err != nil { + return 0, err + } + + if resLen > 0 { + return resLen, nil + } + + rows.rs.done = true + } +} + +func (rows *binaryRows) NextResultSet() error { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +// stmt.Query return binary rows, and get row from this func +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) NextResultSet() (err error) { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +// db.Query return text rows, and get row from this func +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (mf *taosSqlField) typeDatabaseName() string { + //fmt.Println("######## (mf *taosSqlField) typeDatabaseName() mf.fieldType:", mf.fieldType) + switch mf.fieldType { + case C.TSDB_DATA_TYPE_BOOL: + return "BOOL" + + case C.TSDB_DATA_TYPE_TINYINT: + return "TINYINT" + + case C.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT" + + case C.TSDB_DATA_TYPE_INT: + return "INT" + + case C.TSDB_DATA_TYPE_BIGINT: + return "BIGINT" + + case C.TSDB_DATA_TYPE_FLOAT: + return "FLOAT" + + case C.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE" + + case C.TSDB_DATA_TYPE_BINARY: + return "BINARY" + + case C.TSDB_DATA_TYPE_NCHAR: + return "NCHAR" + + case C.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP" + + default: + return "" + } +} + +var ( + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullTime = reflect.TypeOf(NullTime{}) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(interface{})) +) + +func (mf *taosSqlField) scanType() reflect.Type { + //fmt.Println("######## (mf *taosSqlField) scanType() mf.fieldType:", mf.fieldType) + switch mf.fieldType { + case C.TSDB_DATA_TYPE_BOOL: + return scanTypeInt8 + + case C.TSDB_DATA_TYPE_TINYINT: + return scanTypeInt8 + + case C.TSDB_DATA_TYPE_SMALLINT: + return scanTypeInt16 + + case C.TSDB_DATA_TYPE_INT: + return scanTypeInt32 + + case C.TSDB_DATA_TYPE_BIGINT: + return scanTypeInt64 + + case C.TSDB_DATA_TYPE_FLOAT: + return scanTypeFloat32 + + case C.TSDB_DATA_TYPE_DOUBLE: + return scanTypeFloat64 + + case C.TSDB_DATA_TYPE_BINARY: + return scanTypeRawBytes + + case C.TSDB_DATA_TYPE_NCHAR: + return scanTypeRawBytes + + case C.TSDB_DATA_TYPE_TIMESTAMP: + return scanTypeNullTime + + default: + return scanTypeUnknown + } +} diff --git a/src/connector/go/src/taosSql/statement.go b/src/connector/go/src/taosSql/statement.go new file mode 100755 index 000000000000..3aa3e0156455 --- /dev/null +++ b/src/connector/go/src/taosSql/statement.go @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package taosSql + +import ( + "database/sql/driver" + "fmt" + "reflect" +) + +type taosSqlStmt struct { + mc *taosConn + id uint32 + pSql string + paramCount int +} + +func (stmt *taosSqlStmt) Close() error { + return nil +} + +func (stmt *taosSqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *taosSqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc == nil || stmt.mc.taos == nil { + return nil, errInvalidConn + } + return stmt.mc.Exec(stmt.pSql, args) +} + +func (stmt *taosSqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc == nil || stmt.mc.taos == nil { + return nil, errInvalidConn + } + return stmt.query(args) +} + +func (stmt *taosSqlStmt) query(args []driver.Value) (*binaryRows, error) { + mc := stmt.mc + if mc == nil || mc.taos == nil { + return nil, errInvalidConn + } + + querySql := stmt.pSql + + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(stmt.pSql, args) + if err != nil { + return nil, err + } + querySql = prepared + } + + num_fields, err := mc.taosQuery(querySql) + if err == nil { + // Read Result + rows := new(binaryRows) + rows.mc = mc + // Columns field + rows.rs.columns, err = mc.readColumns(num_fields) + return rows, err + } + return nil, err +} + +type converter struct{} + +// ConvertValue mirrors the reference/default converter in database/sql/driver +// with _one_ exception. We support uint64 with their high bit and the default +// implementation does not. This function should be kept in sync with +// database/sql/driver defaultConverter.ConvertValue() except for that +// deliberate difference. +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + + if driver.IsValue(v) { + return v, nil + } + + if vr, ok := v.(driver.Valuer); ok { + sv, err := callValuerValue(vr) + if err != nil { + return nil, err + } + if !driver.IsValue(sv) { + return nil, fmt.Errorf("non-Value type %T returned from Value", sv) + } + + return sv, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } else { + return c.ConvertValue(rv.Elem().Interface()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint(), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + case reflect.Bool: + return rv.Bool(), nil + case reflect.Slice: + ek := rv.Type().Elem().Kind() + if ek == reflect.Uint8 { + return rv.Bytes(), nil + } + return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek) + case reflect.String: + return rv.String(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} + +var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// callValuerValue returns vr.Value(), with one exception: +// If vr.Value is an auto-generated method on a pointer type and the +// pointer is nil, it would panic at runtime in the panicwrap +// method. Treat it like nil instead. +// +// This is so people can implement driver.Value on value types and +// still use nil pointers to those types to mean nil/NULL, just like +// string/*string. +// +// This is an exact copy of the same-named unexported function from the +// database/sql package. +func callValuerValue(vr driver.Valuer) (v driver.Value, err error) { + if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr && + rv.IsNil() && + rv.Type().Elem().Implements(valuerReflectType) { + return nil, nil + } + return vr.Value() +} diff --git a/src/connector/go/src/taosSql/taosLog.go b/src/connector/go/src/taosSql/taosLog.go new file mode 100755 index 000000000000..63af5e34c157 --- /dev/null +++ b/src/connector/go/src/taosSql/taosLog.go @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package taosSql + +import ( + "bufio" + "errors" + "fmt" + "io" + "log" + "os" + "strings" +) + +// Various errors the driver might return. +var ( + errInvalidConn = errors.New("invalid connection") + errConnNoExist = errors.New("no existent connection ") +) + +var taosLog *log.Logger + +// SetLogger is used to set the logger for critical errors. +// The initial logger +func taosLogInit() { + cfgName := "/etc/taos/taos.cfg" + logNameDefault := "/var/log/taos/taosgo.log" + var logName string + + // get log path from cfg file + cfgFile, err := os.OpenFile(cfgName, os.O_RDONLY, 0644) + defer cfgFile.Close() + if err != nil { + fmt.Println(err) + logName = logNameDefault + } else { + logName, err = getLogNameFromCfg(cfgFile) + if err != nil { + fmt.Println(err) + logName = logNameDefault + } + } + + logFile, err := os.OpenFile(logName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + taosLog = log.New(logFile, "", log.LstdFlags) + taosLog.SetPrefix("TAOS DRIVER ") + taosLog.SetFlags(log.LstdFlags|log.Lshortfile) +} + +func getLogNameFromCfg(f *os.File) (string, error) { + // Create file buf, *Reader + r := bufio.NewReader(f) + for { + //read one line, return to slice b + b, _, err := r.ReadLine() + if err != nil { + if err == io.EOF { + break + } + panic(err) + } + + // Remove space of left and right + s := strings.TrimSpace(string(b)) + if strings.Index(s, "#") == 0 { + // comment line + continue + } + + if len(s) == 0 { + continue + } + + var ns string + // If there is a comment on the right of the line, must be remove + index := strings.Index(s, "#") + if index > 0 { + // Gets the string to the left of the comment to determine whether it is empty + ns = s[:index] + if len(ns) == 0 { + continue + } + } else { + ns = s; + } + + ss := strings.Fields(ns) + if strings.Compare("logDir", ss[0]) != 0 { + continue + } + + if len(ss) < 2 { + break + } + + // Add a filename after the path + logName := ss[1] + "/taosgo.log" + return logName,nil + } + + return "", errors.New("no config log path, use default") +} + diff --git a/src/connector/go/src/taosSql/taosSqlCgo.go b/src/connector/go/src/taosSql/taosSqlCgo.go new file mode 100755 index 000000000000..8e6d9e7cc42a --- /dev/null +++ b/src/connector/go/src/taosSql/taosSqlCgo.go @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package taosSql + +/* +#cgo CFLAGS : -I/usr/local/include/taos/ +#cgo LDFLAGS: -L/usr/local/lib/taos -ltaos +#include +#include +#include +#include +*/ +import "C" + +import ( + "errors" + "unsafe" +) + +func (mc *taosConn) taosConnect(ip, user, pass, db string, port int) (taos unsafe.Pointer, err error){ + cuser := C.CString(user) + cpass := C.CString(pass) + cip := C.CString(ip) // TODO: Addr : x.x.x.x:port, must process to ip and port format + cdb := C.CString("") + port = 0 + defer C.free(unsafe.Pointer(cip)) + defer C.free(unsafe.Pointer(cuser)) + defer C.free(unsafe.Pointer(cpass)) + defer C.free(unsafe.Pointer(cdb)) + + taosObj := C.taos_connect(cip, cuser, cpass, cdb, (C.int)(port)) + if taosObj == nil { + return nil, errors.New("taos_connect() fail!") + } + + return (unsafe.Pointer)(taosObj), nil +} + +func (mc *taosConn) taosQuery(sqlstr string) (int, error) { + taosLog.Printf("taosQuery() input sql:%s\n", sqlstr) + + csqlstr := C.CString(sqlstr) + defer C.free(unsafe.Pointer(csqlstr)) + code := int(C.taos_query(mc.taos, csqlstr)) + + if 0 != code { + mc.taos_error() + errStr := C.GoString(C.taos_errstr(mc.taos)) + taosLog.Println("taos_query() failed:", errStr) + return 0, errors.New(errStr) + } + + // read result and save into mc struct + num_fields := int(C.taos_field_count(mc.taos)) + if 0 == num_fields { // there are no select and show kinds of commands + mc.affectedRows = int(C.taos_affected_rows(mc.taos)) + mc.insertId = 0 + } + + return num_fields, nil +} + +func (mc *taosConn) taos_close() { + C.taos_close(mc.taos) +} + +func (mc *taosConn) taos_error() { + // free local resouce: allocated memory/metric-meta refcnt + //var pRes unsafe.Pointer + pRes := C.taos_use_result(mc.taos) + C.taos_free_result(pRes) +} diff --git a/src/connector/go/src/taosSql/utils.go b/src/connector/go/src/taosSql/utils.go new file mode 100755 index 000000000000..a5a90059b50f --- /dev/null +++ b/src/connector/go/src/taosSql/utils.go @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package taosSql + +import ( + "database/sql/driver" + "errors" + "fmt" + "sync/atomic" + "time" +) + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("invalid time string: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + //case '\'': + // buf[pos] = '\\' + // buf[pos+1] = '\'' + // pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +/****************************************************************************** +* Sync utils * +******************************************************************************/ + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} + +// atomicBool is a wrapper around uint32 for usage as a boolean value with +// atomic access. +type atomicBool struct { + _noCopy noCopy + value uint32 +} + +// IsSet returns whether the current boolean value is true +func (ab *atomicBool) IsSet() bool { + return atomic.LoadUint32(&ab.value) > 0 +} + +// Set sets the value of the bool regardless of the previous value +func (ab *atomicBool) Set(value bool) { + if value { + atomic.StoreUint32(&ab.value, 1) + } else { + atomic.StoreUint32(&ab.value, 0) + } +} + +// TrySet sets the value of the bool and returns whether the value changed +func (ab *atomicBool) TrySet(value bool) bool { + if value { + return atomic.SwapUint32(&ab.value, 1) == 0 + } + return atomic.SwapUint32(&ab.value, 0) > 0 +} + +// atomicError is a wrapper for atomically accessed error values +type atomicError struct { + _noCopy noCopy + value atomic.Value +} + +// Set sets the error value regardless of the previous value. +// The value must not be nil +func (ae *atomicError) Set(value error) { + ae.value.Store(value) +} + +// Value returns the current error value +func (ae *atomicError) Value() error { + if v := ae.value.Load(); v != nil { + // this will panic if the value doesn't implement the error interface + return v.(error) + } + return nil +} + +func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { + dargs := make([]driver.Value, len(named)) + for n, param := range named { + if len(param.Name) > 0 { + // TODO: support the use of Named Parameters #561 + return nil, errors.New("taosSql: driver does not support the use of Named Parameters") + } + dargs[n] = param.Value + } + return dargs, nil +} + + diff --git a/src/connector/grafana/tdengine/README.md b/src/connector/grafana/tdengine/README.md new file mode 100644 index 000000000000..91dc73daf15c --- /dev/null +++ b/src/connector/grafana/tdengine/README.md @@ -0,0 +1,72 @@ +TDengine Datasource - build by Taosdata Inc. www.taosdata.com + +TDengine backend server implement 2 urls: + + * `/heartbeat` return 200 ok. Used for "Test connection" on the datasource config page. + * `/query` return data based on input sqls. + +## Installation + +To install this plugin: +Copy the data source you want to /var/lib/grafana/plugins/. Then restart grafana-server. The new data source should now be available in the data source type dropdown in the Add Data Source View. + +``` +cp -r /connector/grafana/tdengine /var/lib/grafana/plugins/ +sudo service grafana-server restart +``` + +### Query API + +Example request +``` javascript +[{ + "refId": "A", + "alias": "taosd-memory", + "sql": "select avg(mem_taosd) from sys.dn where ts > now-5m and ts < now interval(500a)" +}, +{ + "refId": "B", + "alias": "system-memory", + "sql": "select avg(mem_system) from sys.dn where ts > now-5m and ts < now interval(500a)" +}] +``` + +Example response +``` javascript +[{ + "datapoints": [ + [206.488281, 1538137825000], + [206.488281, 1538137855000], + [206.488281, 1538137885500], + [210.609375, 1538137915500], + [210.867188, 1538137945500] + ], + "refId": "A", + "target": "taosd-memory" +}, +{ + "datapoints": [ + [2910.218750, 1538137825000], + [2912.265625, 1538137855000], + [2912.437500, 1538137885500], + [2916.644531, 1538137915500], + [2917.066406, 1538137945500] + ], + "refId": "B", + "target": "system-memory" +}] +``` + +### Heartbeat API + +Example request +``` javascript + get request +``` + +Example response +``` javascript +{ + "message": "Grafana server receive a quest from you!" +} +``` diff --git a/src/connector/grafana/tdengine/css/query-editor.css b/src/connector/grafana/tdengine/css/query-editor.css new file mode 100644 index 000000000000..3b678b9f3689 --- /dev/null +++ b/src/connector/grafana/tdengine/css/query-editor.css @@ -0,0 +1,3 @@ +.generic-datasource-query-row .query-keyword { + width: 75px; +} \ No newline at end of file diff --git a/src/connector/grafana/tdengine/datasource.js b/src/connector/grafana/tdengine/datasource.js new file mode 100644 index 000000000000..14eb8a9b3604 --- /dev/null +++ b/src/connector/grafana/tdengine/datasource.js @@ -0,0 +1,170 @@ +'use strict'; + +System.register(['lodash'], function (_export, _context) { + "use strict"; + var _, _createClass, GenericDatasource; + + function strTrim(str) { + return str.replace(/^\s+|\s+$/gm,''); + } + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + return { + setters: [function (_lodash) { + _ = _lodash.default; + }], + execute: function () { + _createClass = function () { + function defineProperties(target, props) { + for (var i = 0; i < props.length; i++) { + var descriptor = props[i]; + descriptor.enumerable = descriptor.enumerable || false; + descriptor.configurable = true; + if ("value" in descriptor) descriptor.writable = true; + Object.defineProperty(target, descriptor.key, descriptor); + } + } + + return function (Constructor, protoProps, staticProps) { + if (protoProps) defineProperties(Constructor.prototype, protoProps); + if (staticProps) defineProperties(Constructor, staticProps); + return Constructor; + }; + }(); + + _export('GenericDatasource', GenericDatasource = function () { + function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) { + _classCallCheck(this, GenericDatasource); + + this.type = instanceSettings.type; + this.url = instanceSettings.url; + this.name = instanceSettings.name; + this.q = $q; + this.backendSrv = backendSrv; + this.templateSrv = templateSrv; + //this.withCredentials = instanceSettings.withCredentials; + this.headers = { 'Content-Type': 'application/json' }; + var taosuser = instanceSettings.jsonData.user; + var taospwd = instanceSettings.jsonData.password; + if (taosuser == null || taosuser == undefined || taosuser == "") { + taosuser = "root"; + } + if (taospwd == null || taospwd == undefined || taospwd == "") { + taospwd = "taosdata"; + } + + this.headers.Authorization = "Basic " + this.encode(taosuser + ":" + taospwd); + } + + _createClass(GenericDatasource, [{ + key: 'encode', + value: function encode(input) { + var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + var output = ""; + var chr1, chr2, chr3, enc1, enc2, enc3, enc4; + var i = 0; + while (i < input.length) { + chr1 = input.charCodeAt(i++); + chr2 = input.charCodeAt(i++); + chr3 = input.charCodeAt(i++); + enc1 = chr1 >> 2; + enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); + enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); + enc4 = chr3 & 63; + if (isNaN(chr2)) { + enc3 = enc4 = 64; + } else if (isNaN(chr3)) { + enc4 = 64; + } + output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4); + } + + return output; + } + }, { + key: 'generateSql', + value: function generateSql(sql, queryStart, queryEnd, intervalMs) { + if (queryStart == undefined || queryStart == null) { + queryStart = "now-1h"; + } + if (queryEnd == undefined || queryEnd == null) { + queryEnd = "now"; + } + if (intervalMs == undefined || intervalMs == null) { + intervalMs = "20000"; + } + + intervalMs += "a"; + sql = sql.replace(/^\s+|\s+$/gm, ''); + sql = sql.replace("$from", "'" + queryStart + "'"); + sql = sql.replace("$begin", "'" + queryStart + "'"); + sql = sql.replace("$to", "'" + queryEnd + "'"); + sql = sql.replace("$end", "'" + queryEnd + "'"); + sql = sql.replace("$interval", intervalMs); + + return sql; + } + }, { + key: 'query', + value: function query(options) { + var querys = new Array; + for (var i = 0; i < options.targets.length; ++i) { + var query = new Object; + + query.refId = options.targets[i].refId; + query.alias = options.targets[i].alias; + if (query.alias == null || query.alias == undefined) { + query.alias = ""; + } + + //query.sql = this.generateSql(options.targets[i].sql, options.range.raw.from, options.range.raw.to, options.intervalMs); + query.sql = this.generateSql(options.targets[i].sql, options.range.from.toISOString(), options.range.to.toISOString(), options.intervalMs); + console.log(query.sql); + + querys.push(query); + } + + if (querys.length <= 0) { + return this.q.when({ data: [] }); + } + + return this.doRequest({ + url: this.url + '/grafana/query', + data: querys, + method: 'POST' + }); + } + }, { + key: 'testDatasource', + value: function testDatasource() { + return this.doRequest({ + url: this.url + '/grafana/heartbeat', + method: 'GET' + }).then(function (response) { + if (response.status === 200) { + return { status: "success", message: "TDengine Data source is working", title: "Success" }; + } + }); + } + }, { + key: 'doRequest', + value: function doRequest(options) { + options.headers = this.headers; + //console.log(options); + return this.backendSrv.datasourceRequest(options); + } + }]); + + return GenericDatasource; + }()); + + _export('GenericDatasource', GenericDatasource); + } + }; +}); +//# sourceMappingURL=datasource.js.map diff --git a/src/connector/grafana/tdengine/img/taosdata_logo.png b/src/connector/grafana/tdengine/img/taosdata_logo.png new file mode 100644 index 000000000000..88d3bacd0959 Binary files /dev/null and b/src/connector/grafana/tdengine/img/taosdata_logo.png differ diff --git a/src/connector/grafana/tdengine/module.js b/src/connector/grafana/tdengine/module.js new file mode 100644 index 000000000000..8592cf237556 --- /dev/null +++ b/src/connector/grafana/tdengine/module.js @@ -0,0 +1,51 @@ +'use strict'; + +System.register(['./datasource', './query_ctrl'], function (_export, _context) { + "use strict"; + + var GenericDatasource, GenericDatasourceQueryCtrl, GenericConfigCtrl, GenericQueryOptionsCtrl, GenericAnnotationsQueryCtrl; + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + return { + setters: [function (_datasource) { + GenericDatasource = _datasource.GenericDatasource; + }, function (_query_ctrl) { + GenericDatasourceQueryCtrl = _query_ctrl.GenericDatasourceQueryCtrl; + }], + execute: function () { + _export('ConfigCtrl', GenericConfigCtrl = function GenericConfigCtrl() { + _classCallCheck(this, GenericConfigCtrl); + }); + + GenericConfigCtrl.templateUrl = 'partials/config.html'; + + _export('QueryOptionsCtrl', GenericQueryOptionsCtrl = function GenericQueryOptionsCtrl() { + _classCallCheck(this, GenericQueryOptionsCtrl); + }); + + GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html'; + + _export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl = function GenericAnnotationsQueryCtrl() { + _classCallCheck(this, GenericAnnotationsQueryCtrl); + }); + + GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html'; + + _export('Datasource', GenericDatasource); + + _export('QueryCtrl', GenericDatasourceQueryCtrl); + + _export('ConfigCtrl', GenericConfigCtrl); + + _export('QueryOptionsCtrl', GenericQueryOptionsCtrl); + + _export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl); + } + }; +}); +//# sourceMappingURL=module.js.map diff --git a/src/connector/grafana/tdengine/partials/config.html b/src/connector/grafana/tdengine/partials/config.html new file mode 100644 index 000000000000..801a75327188 --- /dev/null +++ b/src/connector/grafana/tdengine/partials/config.html @@ -0,0 +1,19 @@ +

TDengine Connection

+ +
+
+ Host + +
+ +
+
+ User + +
+
+ Password + +
+
+
\ No newline at end of file diff --git a/src/connector/grafana/tdengine/partials/query.editor.html b/src/connector/grafana/tdengine/partials/query.editor.html new file mode 100644 index 000000000000..4f16dc2aa93d --- /dev/null +++ b/src/connector/grafana/tdengine/partials/query.editor.html @@ -0,0 +1,58 @@ + + +
+
+ + +
+
+ +
+
+
+ + +
+
+
+ +
+
+ +
+
+ +
+
{{ctrl.lastGenerateSQL}}
+
+ +
+
Use any SQL that can return Resultset such as:
+- [[timestamp1, value1], [timestamp2, value2], ... ]
+
+Macros:
+- $from -> start timestamp of panel
+- $to -> stop timestamp of panel
+- $interval -> interval of panel
+
+Example of SQL:
+  SELECT count(*)
+  FROM db.table
+  WHERE ts > $from and ts < $to
+  INTERVAL ($interval)
+    
+
+ +
+
{{ctrl.lastQueryError}}
+
+ +
diff --git a/src/connector/grafana/tdengine/plugin.json b/src/connector/grafana/tdengine/plugin.json new file mode 100644 index 000000000000..6093703b700e --- /dev/null +++ b/src/connector/grafana/tdengine/plugin.json @@ -0,0 +1,32 @@ +{ + "name": "TDengine", + "id": "tdengine", + "type": "datasource", + + "partials": { + "config": "partials/config.html" + }, + + "metrics": true, + "annotations": false, + "alerting": true, + + "info": { + "description": "TDengine datasource", + "author": { + "name": "Taosdata Inc.", + "url": "https://www.taosdata.com" + }, + "logos": { + "small": "img/taosdata_logo.png", + "large": "img/taosdata_logo.png" + }, + "version": "1.6.0", + "updated": "2019-07-01" + }, + + "dependencies": { + "grafanaVersion": "5.2.4", + "plugins": [ ] + } +} diff --git a/src/connector/grafana/tdengine/query_ctrl.js b/src/connector/grafana/tdengine/query_ctrl.js new file mode 100644 index 000000000000..fc9737238f1b --- /dev/null +++ b/src/connector/grafana/tdengine/query_ctrl.js @@ -0,0 +1,91 @@ +'use strict'; + +System.register(['app/plugins/sdk'], function (_export, _context) { + "use strict"; + + var QueryCtrl, _createClass, GenericDatasourceQueryCtrl; + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + function _possibleConstructorReturn(self, call) { + if (!self) { + throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); + } + + return call && (typeof call === "object" || typeof call === "function") ? call : self; + } + + function _inherits(subClass, superClass) { + if (typeof superClass !== "function" && superClass !== null) { + throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); + } + + subClass.prototype = Object.create(superClass && superClass.prototype, { + constructor: { + value: subClass, + enumerable: false, + writable: true, + configurable: true + } + }); + if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; + } + + return { + setters: [function (_appPluginsSdk) { + QueryCtrl = _appPluginsSdk.QueryCtrl; + }, function (_cssQueryEditorCss) {}], + execute: function () { + _createClass = function () { + function defineProperties(target, props) { + for (var i = 0; i < props.length; i++) { + var descriptor = props[i]; + descriptor.enumerable = descriptor.enumerable || false; + descriptor.configurable = true; + if ("value" in descriptor) descriptor.writable = true; + Object.defineProperty(target, descriptor.key, descriptor); + } + } + + return function (Constructor, protoProps, staticProps) { + if (protoProps) defineProperties(Constructor.prototype, protoProps); + if (staticProps) defineProperties(Constructor, staticProps); + return Constructor; + }; + }(); + + _export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl = function (_QueryCtrl) { + _inherits(GenericDatasourceQueryCtrl, _QueryCtrl); + + function GenericDatasourceQueryCtrl($scope, $injector) { + _classCallCheck(this, GenericDatasourceQueryCtrl); + + var _this = _possibleConstructorReturn(this, (GenericDatasourceQueryCtrl.__proto__ || Object.getPrototypeOf(GenericDatasourceQueryCtrl)).call(this, $scope, $injector)); + + _this.scope = $scope; + return _this; + } + + _createClass(GenericDatasourceQueryCtrl, [{ + key: 'generateSQL', + value: function generateSQL(query) { + //this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.raw.from, this.panelCtrl.range.raw.to, this.panelCtrl.intervalMs); + this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.from.toISOString(), this.panelCtrl.range.to.toISOString(), this.panelCtrl.intervalMs); + this.showGenerateSQL = !this.showGenerateSQL; + } + }]); + + return GenericDatasourceQueryCtrl; + }(QueryCtrl)); + + _export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl); + + GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html'; + } + }; +}); +//# sourceMappingURL=query_ctrl.js.map diff --git a/src/connector/jdbc/.settings/org.eclipse.core.resources.prefs b/src/connector/jdbc/.settings/org.eclipse.core.resources.prefs new file mode 100755 index 000000000000..1935ba2ee400 --- /dev/null +++ b/src/connector/jdbc/.settings/org.eclipse.core.resources.prefs @@ -0,0 +1,5 @@ +eclipse.preferences.version=1 +encoding//src/main/java=GBK +encoding//src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java=UTF-8 +encoding//src/main/resources=UTF-8 +encoding/=UTF-8 diff --git a/src/connector/jdbc/.settings/org.eclipse.jdt.core.prefs b/src/connector/jdbc/.settings/org.eclipse.jdt.core.prefs new file mode 100755 index 000000000000..5ce451889942 --- /dev/null +++ b/src/connector/jdbc/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,13 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled +org.eclipse.jdt.core.compiler.codegen.methodParameters=do not generate +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 +org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve +org.eclipse.jdt.core.compiler.compliance=1.6 +org.eclipse.jdt.core.compiler.debug.lineNumber=generate +org.eclipse.jdt.core.compiler.debug.localVariable=generate +org.eclipse.jdt.core.compiler.debug.sourceFile=generate +org.eclipse.jdt.core.compiler.problem.assertIdentifier=error +org.eclipse.jdt.core.compiler.problem.enumIdentifier=error +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.source=1.6 diff --git a/src/connector/jdbc/.settings/org.eclipse.m2e.core.prefs b/src/connector/jdbc/.settings/org.eclipse.m2e.core.prefs new file mode 100755 index 000000000000..f897a7f1cb23 --- /dev/null +++ b/src/connector/jdbc/.settings/org.eclipse.m2e.core.prefs @@ -0,0 +1,4 @@ +activeProfiles= +eclipse.preferences.version=1 +resolveWorkspaceProjects=true +version=1 diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt new file mode 100644 index 000000000000..e79584050375 --- /dev/null +++ b/src/connector/jdbc/CMakeLists.txt @@ -0,0 +1,22 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +SET(JDBC_CMD_NAME "jdbc_cmd") +SET(JDBC_TARGET_NAME "jdbc_target") + +FIND_PATH(MVN_INSTALLED mvn /usr/bin/) +IF (MVN_INSTALLED) + MESSAGE(STATUS "MVN is installed and JDBC will be compiled") + ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} + POST_BUILD + COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver*dist.* ${EXECUTABLE_OUTPUT_PATH}/../lib/ + COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml + COMMENT "build jdbc driver") + ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) +ELSE () + MESSAGE(STATUS "MVN is not installed and JDBC is not compiled") +ENDIF () + + diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml new file mode 100755 index 000000000000..05989178c7c4 --- /dev/null +++ b/src/connector/jdbc/pom.xml @@ -0,0 +1,79 @@ + + 4.0.0 + com.taosdata.jdbc + taos-jdbcdriver + 1.0.0 + JDBCDriver + TDengine JDBC Driver + + UTF-8 + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.0.0 + + + + src/main/assembly/assembly-jar.xml + + + + + make-assembly + package + + single + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + GBK + 8 + 8 + true + + + + org.apache.maven.plugins + maven-source-plugin + 2.1.2 + + + attach-sources + + jar + + + + + + + + + commons-logging + commons-logging + 1.1.2 + + + * + * + + + + + org.apache.commons + commons-lang3 + 3.5 + + + diff --git a/src/connector/jdbc/src/main/assembly/assembly-jar.xml b/src/connector/jdbc/src/main/assembly/assembly-jar.xml new file mode 100644 index 000000000000..23ba5529f9b9 --- /dev/null +++ b/src/connector/jdbc/src/main/assembly/assembly-jar.xml @@ -0,0 +1,18 @@ + + + dist + + jar + + false + + + / + true + true + runtime + + + \ No newline at end of file diff --git a/src/connector/jdbc/src/main/assembly/assembly.xml b/src/connector/jdbc/src/main/assembly/assembly.xml new file mode 100755 index 000000000000..bdeaacfb6e21 --- /dev/null +++ b/src/connector/jdbc/src/main/assembly/assembly.xml @@ -0,0 +1,24 @@ + + dist + + tar.gz + + + + true + / + + + + \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/CatalogResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/CatalogResultSet.java new file mode 100644 index 000000000000..3a01e2e09297 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/CatalogResultSet.java @@ -0,0 +1,68 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.ResultSet; +import java.sql.SQLException; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class CatalogResultSet extends TSDBResultSetWrapper { + + + public CatalogResultSet(ResultSet resultSet) { + super.setOriginalResultSet(resultSet); + } + + @Override + public String getString(int columnIndex) throws SQLException { + if (columnIndex <= 1) { + return super.getString(columnIndex); + } else { + return null; + } + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + if (columnIndex <= 1) { + return super.getBoolean(columnIndex); + } else { + return false; + } + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + if (columnIndex <= 1) { + return super.getBytes(columnIndex); + } else { + return null; + } + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + if (columnIndex <= 1) { + return super.getObject(columnIndex); + } else { + return null; + } + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java new file mode 100644 index 000000000000..5c7f80c715c1 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/ColumnMetaData.java @@ -0,0 +1,55 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +public class ColumnMetaData { + + int colType = 0; + String colName = null; + int colSize = -1; + int colIndex = 0; + + public int getColSize() { + return colSize; + } + + public void setColSize(int colSize) { + this.colSize = colSize; + } + + public int getColType() { + return colType; + } + + public void setColType(int colType) { + this.colType = colType; + } + + public String getColName() { + return colName; + } + + public void setColName(String colName) { + this.colName = colName; + } + + public int getColIndex() { + return colIndex; + } + + public void setColIndex(int colIndex) { + this.colIndex = colIndex; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java new file mode 100644 index 000000000000..027d2197a32a --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java @@ -0,0 +1,1057 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.sql.Date; +import java.util.*; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class DatabaseMetaDataResultSet implements ResultSet { + + private List columnMetaDataList; + private List rowDataList; + private TSDBResultSetRowData rowCursor; + + // position of cursor, starts from 0 as beforeFirst, increases as next() is called + private int cursorRowNumber = 0; + + public DatabaseMetaDataResultSet() { + rowDataList = new ArrayList(); + columnMetaDataList = new ArrayList(); + } + + public List getRowDataList() { + return rowDataList; + } + + public void setRowDataList(List rowDataList) { + this.rowDataList = rowDataList; + } + + public List getColumnMetaDataList() { + return columnMetaDataList; + } + + public void setColumnMetaDataList(List columnMetaDataList) { + this.columnMetaDataList = columnMetaDataList; + } + + public TSDBResultSetRowData getRowCursor() { + return rowCursor; + } + + public void setRowCursor(TSDBResultSetRowData rowCursor) { + this.rowCursor = rowCursor; + } + + @Override + public boolean next() throws SQLException { + boolean ret = false; + if (rowDataList.size() > 0) { + ret = rowDataList.iterator().hasNext(); + if (ret) { + rowCursor = rowDataList.iterator().next(); + cursorRowNumber++; + } + } + return ret; + } + + @Override + public void close() throws SQLException { + + } + + @Override + public boolean wasNull() throws SQLException { + return false; + } + + @Override + public String getString(int columnIndex) throws SQLException { + columnIndex--; + return rowCursor.getString(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + columnIndex--; + return rowCursor.getBoolean(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + return (byte) rowCursor.getInt(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + return (short) rowCursor.getInt(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public int getInt(int columnIndex) throws SQLException { + return rowCursor.getInt(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + return rowCursor.getLong(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + return rowCursor.getFloat(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + return rowCursor.getDouble(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + return new BigDecimal(rowCursor.getDouble(columnIndex, columnMetaDataList.get(columnIndex).getColType())); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + return (rowCursor.getString(columnIndex, columnMetaDataList.get(columnIndex).getColType())).getBytes(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + return rowCursor.getTimestamp(columnIndex); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(findColumn(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(findColumn(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(findColumn(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(findColumn(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(findColumn(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(findColumn(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(findColumn(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(findColumn(columnLabel)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(findColumn(columnLabel)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(findColumn(columnLabel)); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void clearWarnings() throws SQLException { + + } + + @Override + public String getCursorName() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return new TSDBResultSetMetaData(this.columnMetaDataList); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + return rowCursor.get(columnIndex); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return rowCursor.get(findColumn(columnLabel)); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + Iterator colMetaDataIt = this.columnMetaDataList.iterator(); + while (colMetaDataIt.hasNext()) { + ColumnMetaData colMetaData = colMetaDataIt.next(); + if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) { + return colMetaData.getColIndex() + 1; + } + } + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + return new BigDecimal(rowCursor.getDouble(columnIndex, columnMetaDataList.get(columnIndex).getColType())); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return getBigDecimal(findColumn(columnLabel)); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + return cursorRowNumber == 0; + } + + @Override + public boolean isAfterLast() throws SQLException { + return rowDataList.iterator().hasNext(); + } + + @Override + public boolean isFirst() throws SQLException { + return cursorRowNumber == 1; + } + + @Override + public boolean isLast() throws SQLException { + return cursorRowNumber == rowDataList.size(); + } + + @Override + public void beforeFirst() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void afterLast() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public boolean first() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public boolean last() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public int getRow() throws SQLException { + if (cursorRowNumber > 0 && cursorRowNumber <= rowDataList.size()) { + return cursorRowNumber; + } else { + return 0; + } + + } + + @Override + public boolean absolute(int row) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public boolean relative(int rows) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public boolean previous() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + + } + + @Override + public int getFetchDirection() throws SQLException { + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + + } + + @Override + public int getFetchSize() throws SQLException { + return 0; + } + + @Override + public int getType() throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public boolean rowUpdated() throws SQLException { + return false; + } + + @Override + public boolean rowInserted() throws SQLException { + return false; + } + + @Override + public boolean rowDeleted() throws SQLException { + return false; + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void insertRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void updateRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void deleteRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void refreshRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void cancelRowUpdates() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void moveToInsertRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void moveToCurrentRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public Statement getStatement() throws SQLException { + return null; + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + return null; + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + return null; + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + return null; + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return null; + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + return null; + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + return null; + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + return null; + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + return null; + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + return null; + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return null; + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + return null; + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return null; + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + return null; + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + return null; + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + + } + + @Override + public int getHoldability() throws SQLException { + return 0; + } + + @Override + public boolean isClosed() throws SQLException { + return false; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + return null; + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + return null; + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + return null; + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + + } + + @Override + public String getNString(int columnIndex) throws SQLException { + return null; + } + + @Override + public String getNString(String columnLabel) throws SQLException { + return null; + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + return null; + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } + + private int getTrueColumnIndex(int columnIndex) throws SQLException { + if (columnIndex < 1) { + throw new SQLException("Column Index out of range, " + columnIndex + " < " + 1); + } + + int numOfCols = this.columnMetaDataList.size(); + if (columnIndex > numOfCols) { + throw new SQLException("Column Index out of range, " + columnIndex + " > " + numOfCols); + } + + return columnIndex - 1; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java new file mode 100644 index 000000000000..b6587b942de3 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java @@ -0,0 +1,986 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.Calendar; +import java.util.Map; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class EmptyResultSet implements ResultSet { + + @Override + public boolean next() throws SQLException { + return false; + } + + @Override + public void close() throws SQLException { + + } + + @Override + public boolean wasNull() throws SQLException { + return false; + } + + @Override + public String getString(int columnIndex) throws SQLException { + return null; + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + return false; + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + return 0; + } + + @Override + public short getShort(int columnIndex) throws SQLException { + return 0; + } + + @Override + public int getInt(int columnIndex) throws SQLException { + return 0; + } + + @Override + public long getLong(int columnIndex) throws SQLException { + return 0; + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + return 0; + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + return 0; + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + return null; + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + return new byte[0]; + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + return null; + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + return null; + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + return null; + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + return null; + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + return null; + } + + @Override + public String getString(String columnLabel) throws SQLException { + return null; + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return false; + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return 0; + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return 0; + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return 0; + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return 0; + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return 0; + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return 0; + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return null; + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return new byte[0]; + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return null; + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return null; + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return null; + } + + @Override + public void clearWarnings() throws SQLException { + + } + + @Override + public String getCursorName() throws SQLException { + return null; + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return null; + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + return null; + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return null; + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + return 0; + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + return null; + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + return null; + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return null; + } + + @Override + public boolean isBeforeFirst() throws SQLException { + return false; + } + + @Override + public boolean isAfterLast() throws SQLException { + return false; + } + + @Override + public boolean isFirst() throws SQLException { + return false; + } + + @Override + public boolean isLast() throws SQLException { + return false; + } + + @Override + public void beforeFirst() throws SQLException { + + } + + @Override + public void afterLast() throws SQLException { + + } + + @Override + public boolean first() throws SQLException { + return false; + } + + @Override + public boolean last() throws SQLException { + return false; + } + + @Override + public int getRow() throws SQLException { + return 0; + } + + @Override + public boolean absolute(int row) throws SQLException { + return false; + } + + @Override + public boolean relative(int rows) throws SQLException { + return false; + } + + @Override + public boolean previous() throws SQLException { + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + + } + + @Override + public int getFetchDirection() throws SQLException { + return 0; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + + } + + @Override + public int getFetchSize() throws SQLException { + return 0; + } + + @Override + public int getType() throws SQLException { + return 0; + } + + @Override + public int getConcurrency() throws SQLException { + return 0; + } + + @Override + public boolean rowUpdated() throws SQLException { + return false; + } + + @Override + public boolean rowInserted() throws SQLException { + return false; + } + + @Override + public boolean rowDeleted() throws SQLException { + return false; + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + + } + + @Override + public void insertRow() throws SQLException { + + } + + @Override + public void updateRow() throws SQLException { + + } + + @Override + public void deleteRow() throws SQLException { + + } + + @Override + public void refreshRow() throws SQLException { + + } + + @Override + public void cancelRowUpdates() throws SQLException { + + } + + @Override + public void moveToInsertRow() throws SQLException { + + } + + @Override + public void moveToCurrentRow() throws SQLException { + + } + + @Override + public Statement getStatement() throws SQLException { + return null; + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + return null; + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + return null; + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + return null; + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + return null; + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + return null; + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return null; + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + return null; + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + return null; + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + return null; + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + return null; + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + return null; + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return null; + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + return null; + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + return null; + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return null; + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + return null; + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + return null; + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + + } + + @Override + public int getHoldability() throws SQLException { + return 0; + } + + @Override + public boolean isClosed() throws SQLException { + return false; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + return null; + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + return null; + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + return null; + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + + } + + @Override + public String getNString(int columnIndex) throws SQLException { + return null; + } + + @Override + public String getNString(String columnLabel) throws SQLException { + return null; + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + return null; + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + return null; + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + return null; + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + return null; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetColumnsResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetColumnsResultSet.java new file mode 100644 index 000000000000..e15415e03794 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetColumnsResultSet.java @@ -0,0 +1,51 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.ResultSet; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class GetColumnsResultSet extends TSDBResultSetWrapper { + private String catalog; + private String schemaPattern; + private String tableNamePattern; + private String columnNamePattern; + + public GetColumnsResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) { + super.setOriginalResultSet(resultSet); + this.catalog = catalog; + this.schemaPattern = schemaPattern; + this.tableNamePattern = tableNamePattern; + this.columnNamePattern = columnNamePattern; + } + + @Override + public String getString(int columnIndex) { + switch (columnIndex) { + case 1: + return catalog; + case 2: + return null; + case 3: + return tableNamePattern; + default: + return null; + } + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetTablesResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetTablesResultSet.java new file mode 100644 index 000000000000..e28f6e3c9adf --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/GetTablesResultSet.java @@ -0,0 +1,53 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.ResultSet; +import java.sql.SQLException; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class GetTablesResultSet extends TSDBResultSetWrapper { + + private String catalog; + private String schemaPattern; + private String tableNamePattern; + private String[] types; + + public GetTablesResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String[] types) { + super.setOriginalResultSet(resultSet); + this.catalog = catalog; + this.schemaPattern = schemaPattern; + this.tableNamePattern = tableNamePattern; + this.types = types; + } + + @Override + public String getString(int columnIndex) throws SQLException { + String ret = null; + switch (columnIndex) { + case 3: + return super.getString(1); + case 4: + return "table"; + default: + return null; + } + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java new file mode 100644 index 000000000000..de86162b1c5a --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -0,0 +1,341 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Enumeration; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; + +public class TSDBConnection implements Connection { + + private TSDBJNIConnector connector = null; + + protected Properties props = null; + + private String catalog = null; + + private TSDBDatabaseMetaData dbMetaData = null; + + private Properties clientInfoProps = new Properties(); + + private int timeoutMilliseconds = 0; + + private String tsCharSet = ""; + + public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { + this.dbMetaData = meta; + connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), + Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), + info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER), + info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); + } + + private void connect(String host, int port, String dbName, String user, String password) throws SQLException { + this.connector = new TSDBJNIConnector(); + this.connector.connect(host, port, dbName, user, password); + + try { + this.setCatalog(dbName); + } catch (SQLException e) { + e.printStackTrace(); + } + + this.dbMetaData.setConnection(this); + } + + public TSDBJNIConnector getConnection() { + return this.connector; + } + + public Statement createStatement() throws SQLException { + if (!this.connector.isClosed()) { + return new TSDBStatement(this.connector); + } else { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } + } + + public PreparedStatement prepareStatement(String sql) throws SQLException { + if (!this.connector.isClosed()) { + return new TSDBPreparedStatement(this.connector, sql); + } else { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } + } + + public CallableStatement prepareCall(String sql) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String nativeSQL(String sql) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setAutoCommit(boolean autoCommit) throws SQLException { + } + + public boolean getAutoCommit() throws SQLException { + return true; + } + + public void commit() throws SQLException { + } + + public void rollback() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void close() throws SQLException { + if (this.connector != null && !this.connector.isClosed()) { + this.connector.closeConnection(); + } else { + throw new SQLException(TSDBConstants.WrapErrMsg("connection is already closed!")); + } + } + + public boolean isClosed() throws SQLException { + return this.connector.isClosed(); + } + + /** + * A connection's database is able to provide information describing its tables, + * its supported SQL grammar, its stored procedures, the capabilities of this + * connection, etc. This information is made available through a + * DatabaseMetaData object. + * + * @return a DatabaseMetaData object for this connection + * @exception SQLException + * if a database access error occurs + */ + public DatabaseMetaData getMetaData() throws SQLException { + return this.dbMetaData; + } + + /** + * This readOnly option is not supported by TDengine. However, the method is intentionally left blank here to + * support HikariCP connection. + * @param readOnly + * @throws SQLException + */ + public void setReadOnly(boolean readOnly) throws SQLException { + } + + public boolean isReadOnly() throws SQLException { + return true; + } + + public void setCatalog(String catalog) throws SQLException { + this.catalog = catalog; + } + + public String getCatalog() throws SQLException { + return this.catalog; + } + + /** + * The transaction isolation level option is not supported by TDengine. + * This method is intentionally left empty to support HikariCP connection. + * @param level + * @throws SQLException + */ + public void setTransactionIsolation(int level) throws SQLException { + } + + public int getTransactionIsolation() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public SQLWarning getWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void clearWarnings() throws SQLException { + // left blank to support HikariCP connection + //todo: implement getWarnings according to the warning messages returned from TDengine + } + + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + // This method is implemented in the current way to support Spark + if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) { + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } + + if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) { + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } + + return this.prepareStatement(sql); + } + + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Map> getTypeMap() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setTypeMap(Map> map) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setHoldability(int holdability) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getHoldability() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Savepoint setSavepoint() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Savepoint setSavepoint(String name) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void rollback(Savepoint savepoint) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return this.prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Clob createClob() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Blob createBlob() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public NClob createNClob() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public SQLXML createSQLXML() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isValid(int timeout) throws SQLException { + return !this.isClosed(); + } + + public void setClientInfo(String name, String value) throws SQLClientInfoException { + clientInfoProps.setProperty(name, value); + } + + public void setClientInfo(Properties properties) throws SQLClientInfoException { + for (Enumeration enumer = properties.keys(); enumer.hasMoreElements();) { + String name = (String) enumer.nextElement(); + clientInfoProps.put(name, properties.getProperty(name)); + } + } + + public String getClientInfo(String name) throws SQLException { + return clientInfoProps.getProperty(name); + } + + public Properties getClientInfo() throws SQLException { + return clientInfoProps; + } + + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setSchema(String schema) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String getSchema() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void abort(Executor executor) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + this.timeoutMilliseconds = milliseconds; + } + + public int getNetworkTimeout() throws SQLException { + return this.timeoutMilliseconds; + } + + public T unwrap(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isWrapperFor(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java new file mode 100644 index 000000000000..ba0c6d939e76 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -0,0 +1,84 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.util.HashMap; +import java.util.Map; + +public abstract class TSDBConstants { + + public static final String DEFAULT_PORT = "6200"; + public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!"; + public static final String INVALID_VARIABLES = "invalid variables"; + public static Map DATATYPE_MAP = null; + + public static final int JNI_SUCCESS = 0; + public static final int JNI_TDENGINE_ERROR = -1; + public static final int JNI_CONNECTION_NULL = -2; + public static final int JNI_RESULT_SET_NULL = -3; + public static final int JNI_NUM_OF_FIELDS_0 = -4; + public static final int JNI_SQL_NULL = -5; + public static final int JNI_FETCH_END = -6; + + public static final int TSDB_DATA_TYPE_NULL = 0; + public static final int TSDB_DATA_TYPE_BOOL = 1; + public static final int TSDB_DATA_TYPE_TINYINT = 2; + public static final int TSDB_DATA_TYPE_SMALLINT = 3; + public static final int TSDB_DATA_TYPE_INT = 4; + public static final int TSDB_DATA_TYPE_BIGINT = 5; + public static final int TSDB_DATA_TYPE_FLOAT = 6; + public static final int TSDB_DATA_TYPE_DOUBLE = 7; + public static final int TSDB_DATA_TYPE_BINARY = 8; + public static final int TSDB_DATA_TYPE_TIMESTAMP = 9; + public static final int TSDB_DATA_TYPE_NCHAR = 10; + + public static String WrapErrMsg(String msg) { + return "TDengine Error: " + msg; + } + + public static String FixErrMsg(int code) { + switch (code) { + case JNI_TDENGINE_ERROR: + return WrapErrMsg("internal error of database!"); + case JNI_CONNECTION_NULL: + return WrapErrMsg("invalid tdengine connection!"); + case JNI_RESULT_SET_NULL: + return WrapErrMsg("invalid resultset pointer!"); + case JNI_NUM_OF_FIELDS_0: + return WrapErrMsg("invalid num of fields!"); + case JNI_SQL_NULL: + return WrapErrMsg("can't execute empty sql!"); + case JNI_FETCH_END: + return WrapErrMsg("fetch to the end of resultset"); + default: + break; + } + return WrapErrMsg("unkown error!"); + } + + static { + DATATYPE_MAP = new HashMap(); + DATATYPE_MAP.put(1, "BOOL"); + DATATYPE_MAP.put(2, "TINYINT"); + DATATYPE_MAP.put(3, "SMALLINT"); + DATATYPE_MAP.put(4, "INT"); + DATATYPE_MAP.put(5, "BIGINT"); + DATATYPE_MAP.put(6, "FLOAT"); + DATATYPE_MAP.put(7, "DOUBLE"); + DATATYPE_MAP.put(8, "BINARY"); + DATATYPE_MAP.put(9, "TIMESTAMP"); + DATATYPE_MAP.put(10, "NCHAR"); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java new file mode 100644 index 000000000000..6cb3263e5674 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -0,0 +1,800 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; + +public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { + + private String dbProductName = null; + private String url = null; + private String userName = null; + private Connection conn = null; + + public TSDBDatabaseMetaData(String dbProductName, String url, String userName) { + this.dbProductName = dbProductName; + this.url = url; + this.userName = userName; + } + + public void setConnection(Connection conn) { + this.conn = conn; + } + + public T unwrap(Class iface) throws SQLException { + return null; + } + + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } + + public boolean allProceduresAreCallable() throws SQLException { + return false; + } + + public boolean allTablesAreSelectable() throws SQLException { + return false; + } + + public String getURL() throws SQLException { + return this.url; + } + + public String getUserName() throws SQLException { + return this.userName; + } + + public boolean isReadOnly() throws SQLException { + return false; + } + + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + public boolean nullsAreSortedLow() throws SQLException { + return false; + } + + public boolean nullsAreSortedAtStart() throws SQLException { + return false; + } + + public boolean nullsAreSortedAtEnd() throws SQLException { + return false; + } + + public String getDatabaseProductName() throws SQLException { + return this.dbProductName; + } + + public String getDatabaseProductVersion() throws SQLException { + return "1.5.1"; + } + + public String getDriverName() throws SQLException { + return TSDBDriver.class.getName(); + } + + public String getDriverVersion() throws SQLException { + return "1.0.0"; + } + + public int getDriverMajorVersion() { + return 0; + } + + public int getDriverMinorVersion() { + return 0; + } + + public boolean usesLocalFiles() throws SQLException { + return false; + } + + public boolean usesLocalFilePerTable() throws SQLException { + return false; + } + + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return false; + } + + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } + + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + public String getIdentifierQuoteString() throws SQLException { + return " "; + } + + public String getSQLKeywords() throws SQLException { + return null; + } + + public String getNumericFunctions() throws SQLException { + return null; + } + + public String getStringFunctions() throws SQLException { + return null; + } + + public String getSystemFunctions() throws SQLException { + return null; + } + + public String getTimeDateFunctions() throws SQLException { + return null; + } + + public String getSearchStringEscape() throws SQLException { + return null; + } + + public String getExtraNameCharacters() throws SQLException { + return null; + } + + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return true; + } + + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return true; + } + + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + public boolean nullPlusNonNullIsNull() throws SQLException { + return false; + } + + public boolean supportsConvert() throws SQLException { + return false; + } + + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + public boolean supportsTableCorrelationNames() throws SQLException { + return false; + } + + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } + + public boolean supportsOrderByUnrelated() throws SQLException { + return false; + } + + public boolean supportsGroupBy() throws SQLException { + return false; + } + + public boolean supportsGroupByUnrelated() throws SQLException { + return false; + } + + public boolean supportsGroupByBeyondSelect() throws SQLException { + return false; + } + + public boolean supportsLikeEscapeClause() throws SQLException { + return false; + } + + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } + + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } + + public boolean supportsNonNullableColumns() throws SQLException { + return false; + } + + public boolean supportsMinimumSQLGrammar() throws SQLException { + return false; + } + + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return false; + } + + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + public boolean supportsOuterJoins() throws SQLException { + return false; + } + + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + public boolean supportsLimitedOuterJoins() throws SQLException { + return false; + } + + public String getSchemaTerm() throws SQLException { + return null; + } + + public String getProcedureTerm() throws SQLException { + return null; + } + + public String getCatalogTerm() throws SQLException { + return "database"; + } + + public boolean isCatalogAtStart() throws SQLException { + return true; + } + + public String getCatalogSeparator() throws SQLException { + return "."; + } + + public boolean supportsSchemasInDataManipulation() throws SQLException { + return false; + } + + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return false; + } + + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return false; + } + + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return false; + } + + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return false; + } + + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return true; + } + + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return false; + } + + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return false; + } + + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return false; + } + + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return false; + } + + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + public boolean supportsUnion() throws SQLException { + return false; + } + + public boolean supportsUnionAll() throws SQLException { + return false; + } + + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } + + public int getMaxColumnNameLength() throws SQLException { + return 0; + } + + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } + + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + public int getMaxConnections() throws SQLException { + return 0; + } + + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + public int getMaxIndexLength() throws SQLException { + return 0; + } + + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + public int getMaxRowSize() throws SQLException { + return 0; + } + + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return false; + } + + public int getMaxStatementLength() throws SQLException { + return 0; + } + + public int getMaxStatements() throws SQLException { + return 0; + } + + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + public int getDefaultTransactionIsolation() throws SQLException { + return 0; + } + + public boolean supportsTransactions() throws SQLException { + return false; + } + + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return false; + } + + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) + throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) + throws SQLException { + if (conn != null && !conn.isClosed()) { + Statement stmt = conn.createStatement(); + if (catalog == null || catalog.length() < 1) { + catalog = conn.getCatalog(); + } + stmt.executeUpdate("use " + catalog); + ResultSet resultSet0 = stmt.executeQuery("show tables"); + GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types); + return getTablesResultSet; + } else { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } + } + + public ResultSet getSchemas() throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getCatalogs() throws SQLException { + + if (conn != null && !conn.isClosed()) { + Statement stmt = conn.createStatement(); + ResultSet resultSet0 = stmt.executeQuery("show databases"); + CatalogResultSet resultSet = new CatalogResultSet(resultSet0); + return resultSet; + } else { + return getEmptyResultSet(); + } + } + + public ResultSet getTableTypes() throws SQLException { + DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); + + // set up ColumnMetaDataList + List columnMetaDataList = new ArrayList(1); + ColumnMetaData colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(0); + colMetaData.setColName("TABLE_TYPE"); + colMetaData.setColSize(10); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + + // set up rowDataList + List rowDataList = new ArrayList(2); + TSDBResultSetRowData rowData = new TSDBResultSetRowData(); + rowData.setString(0, "TABLE"); + rowDataList.add(rowData); + rowData = new TSDBResultSetRowData(); + rowData.setString(0, "STABLE"); + rowDataList.add(rowData); + + resultSet.setColumnMetaDataList(columnMetaDataList); + resultSet.setRowDataList(rowDataList); + return resultSet; + } + + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, + String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getTypeInfo() throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) + throws SQLException { + return getEmptyResultSet(); + } + + public boolean supportsResultSetType(int type) throws SQLException { + return false; + } + + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return false; + } + + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } + + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } + + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } + + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + public boolean supportsBatchUpdates() throws SQLException { + return false; + } + + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) + throws SQLException { + return getEmptyResultSet(); + } + + public Connection getConnection() throws SQLException { + return null; + } + + public boolean supportsSavepoints() throws SQLException { + return false; + } + + public boolean supportsNamedParameters() throws SQLException { + return false; + } + + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return false; + } + + public int getResultSetHoldability() throws SQLException { + return 0; + } + + public int getDatabaseMajorVersion() throws SQLException { + return 0; + } + + public int getDatabaseMinorVersion() throws SQLException { + return 0; + } + + public int getJDBCMajorVersion() throws SQLException { + return 0; + } + + public int getJDBCMinorVersion() throws SQLException { + return 0; + } + + public int getSQLStateType() throws SQLException { + return 0; + } + + public boolean locatorsUpdateCopy() throws SQLException { + return false; + } + + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + public RowIdLifetime getRowIdLifetime() throws SQLException { + return null; + } + + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + return null; + } + + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + public ResultSet getClientInfoProperties() throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + private ResultSet getEmptyResultSet() { + return new EmptyResultSet(); + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java new file mode 100755 index 000000000000..7881c06b6a3e --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -0,0 +1,355 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import org.apache.commons.lang3.StringUtils; + +import java.sql.*; +import java.util.Properties; +import java.util.logging.Logger; + +/** + * The Java SQL framework allows for multiple database drivers. Each driver + * should supply a class that implements the Driver interface + * + *

+ * The DriverManager will try to load as many drivers as it can find and then + * for any given connection request, it will ask each driver in turn to try to + * connect to the target URL. + * + *

+ * It is strongly recommended that each Driver class should be small and stand + * alone so that the Driver class can be loaded and queried without bringing in + * vast quantities of supporting code. + * + *

+ * When a Driver class is loaded, it should create an instance of itself and + * register it with the DriverManager. This means that a user can load and + * register a driver by doing Class.forName("foo.bah.Driver") + */ +public class TSDBDriver implements java.sql.Driver { + + @Deprecated + private static final String URL_PREFIX1 = "jdbc:TSDB://"; + + private static final String URL_PREFIX = "jdbc:TAOS://"; + + /** + * Key used to retrieve the database value from the properties instance passed + * to the driver. + */ + public static final String PROPERTY_KEY_DBNAME = "dbname"; + + /** + * Key used to retrieve the host value from the properties instance passed to + * the driver. + */ + public static final String PROPERTY_KEY_HOST = "host"; + /** + * Key used to retrieve the password value from the properties instance passed + * to the driver. + */ + public static final String PROPERTY_KEY_PASSWORD = "password"; + + /** + * Key used to retrieve the port number value from the properties instance + * passed to the driver. + */ + public static final String PROPERTY_KEY_PORT = "port"; + + /** + * Key used to retrieve the user value from the properties instance passed to + * the driver. + */ + public static final String PROPERTY_KEY_USER = "user"; + + /** + * Key for the configuration file directory of TSDB client in properties instance + */ + public static final String PROPERTY_KEY_CONFIG_DIR = "cfgdir"; + + /** + * Key for the timezone used by the TSDB client in properties instance + */ + public static final String PROPERTY_KEY_TIME_ZONE = "timezone"; + + /** + * Key for the locale used by the TSDB client in properties instance + */ + public static final String PROPERTY_KEY_LOCALE = "locale"; + + + /** + * Key for the char encoding used by the TSDB client in properties instance + */ + public static final String PROPERTY_KEY_CHARSET = "charset"; + + public static final String PROPERTY_KEY_PROTOCOL = "protocol"; + + /** + * Index for port coming out of parseHostPortPair(). + */ + public final static int PORT_NUMBER_INDEX = 1; + + /** + * Index for host coming out of parseHostPortPair(). + */ + public final static int HOST_NAME_INDEX = 0; + + private TSDBDatabaseMetaData dbMetaData = null; + + static { + try { + java.sql.DriverManager.registerDriver(new TSDBDriver()); + } catch (SQLException E) { + throw new RuntimeException(TSDBConstants.WrapErrMsg("can't register tdengine jdbc driver!")); + } + } + + public Connection connect(String url, Properties info) throws SQLException { + if (url == null) { + throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!")); + } + + Properties props = null; + + if ((props = parseURL(url, info)) == null) { + return null; + } + + try { + TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET), + (String) props.get(PROPERTY_KEY_TIME_ZONE)); + Connection newConn = new TSDBConnection(props, this.dbMetaData); + return newConn; + } catch (SQLWarning sqlWarning) { + sqlWarning.printStackTrace(); + Connection newConn = new TSDBConnection(props, this.dbMetaData); + return newConn; + } catch (SQLException sqlEx) { + throw sqlEx; + } catch (Exception ex) { + SQLException sqlEx = new SQLException("SQLException:" + ex.toString()); + sqlEx.initCause(ex); + throw sqlEx; + } + } + + /** + * Parses hostPortPair in the form of [host][:port] into an array, with the + * element of index HOST_NAME_INDEX being the host (or null if not specified), + * and the element of index PORT_NUMBER_INDEX being the port (or null if not + * specified). + * + * @param hostPortPair + * host and port in form of of [host][:port] + * + * @return array containing host and port as Strings + * + * @throws SQLException + * if a parse error occurs + */ + protected static String[] parseHostPortPair(String hostPortPair) throws SQLException { + String[] splitValues = new String[2]; + + int portIndex = hostPortPair.indexOf(":"); + + String hostname = null; + + if (portIndex != -1) { + if ((portIndex + 1) < hostPortPair.length()) { + String portAsString = hostPortPair.substring(portIndex + 1); + hostname = hostPortPair.substring(0, portIndex); + + splitValues[HOST_NAME_INDEX] = hostname; + + splitValues[PORT_NUMBER_INDEX] = portAsString; + } else { + throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!")); + } + } else { + splitValues[HOST_NAME_INDEX] = hostPortPair; + splitValues[PORT_NUMBER_INDEX] = null; + } + + return splitValues; + } + + public boolean acceptsURL(String url) throws SQLException { + return true; + } + + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + if (info == null) { + info = new Properties(); + } + + if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) { + info = parseURL(url, info); + } + + DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST)); + hostProp.required = true; + + DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, + info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT)); + portProp.required = false; + + DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME)); + dbProp.required = false; + dbProp.description = "Database name"; + + DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER)); + userProp.required = true; + + DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, + info.getProperty(PROPERTY_KEY_PASSWORD)); + passwordProp.required = true; + + DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5]; + propertyInfo[0] = hostProp; + propertyInfo[1] = portProp; + propertyInfo[2] = dbProp; + propertyInfo[3] = userProp; + propertyInfo[4] = passwordProp; + + return propertyInfo; + } + + /** + * example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password + */ + + public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException { + Properties urlProps = (defaults != null) ? defaults : new Properties(); + if (url == null) { + return null; + } + + if (!StringUtils.startsWithIgnoreCase(url, URL_PREFIX) && !StringUtils.startsWithIgnoreCase(url, URL_PREFIX1)) { + return null; + } + + String urlForMeta = url; + + String dbProductName = url.substring(url.indexOf(":") + 1); + dbProductName = dbProductName.substring(0, dbProductName.indexOf(":")); + int beginningOfSlashes = url.indexOf("//"); + url = url.substring(beginningOfSlashes + 2); + + String host = url.substring(0, url.indexOf(":")); + url = url.substring(url.indexOf(":") + 1); + urlProps.setProperty(PROPERTY_KEY_HOST, host); + + String port = url.substring(0, url.indexOf("/")); + urlProps.setProperty(PROPERTY_KEY_PORT, port); + url = url.substring(url.indexOf("/") + 1); + + if (url.indexOf("?") != -1) { + String dbName = url.substring(0, url.indexOf("?")); + urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName); + url = url.trim().substring(1); + } else { + // without user & password so return + String dbName = url.trim(); + urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName); + this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user")); + return urlProps; + } + + String[] queryStrings = url.trim().split("&"); + String user = ""; + for (String queryStr : queryStrings) { + String[] kvPair = queryStr.trim().split("="); + switch (kvPair[0].toLowerCase()) { + case PROPERTY_KEY_USER: + urlProps.setProperty(PROPERTY_KEY_USER, kvPair[1]); + user = kvPair[1]; + break; + case PROPERTY_KEY_PASSWORD: + urlProps.setProperty(PROPERTY_KEY_PASSWORD, kvPair[1]); + break; + case PROPERTY_KEY_TIME_ZONE: + urlProps.setProperty(PROPERTY_KEY_TIME_ZONE, kvPair[1]); + break; + case PROPERTY_KEY_LOCALE: + urlProps.setProperty(PROPERTY_KEY_LOCALE, kvPair[1]); + break; + case PROPERTY_KEY_CHARSET: + urlProps.setProperty(PROPERTY_KEY_CHARSET, kvPair[1]); + break; + case PROPERTY_KEY_CONFIG_DIR: + urlProps.setProperty(PROPERTY_KEY_CONFIG_DIR, kvPair[1]); + break; + } + } + + this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user); + + return urlProps; + } + + public int getMajorVersion() { + return 1; + } + + public int getMinorVersion() { + return 1; + } + + public boolean jdbcCompliant() { + return false; + } + + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + return null; + } + + /** + * Returns the host property + * + * @param props + * the java.util.Properties instance to retrieve the hostname from. + * + * @return the host + */ + public String host(Properties props) { + return props.getProperty(PROPERTY_KEY_HOST, "localhost"); + } + + /** + * Returns the port number property + * + * @param props + * the properties to get the port number from + * + * @return the port number + */ + public int port(Properties props) { + return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT)); + } + + /** + * Returns the database property from props + * + * @param props + * the Properties to look for the database property. + * + * @return the database name. + */ + public String database(Properties props) { + return props.getProperty(PROPERTY_KEY_DBNAME); + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java new file mode 100644 index 000000000000..0e61755b6965 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -0,0 +1,223 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.util.HashMap; +import java.util.Map; + +/** + * + * TDengine error code and error message enumeration. + * + */ +public enum TSDBError { + + TSDB_CODE_SUCCESS(0, "success"), + TSDB_CODE_ACTION_IN_PROGRESS(1, "in progress"), + TSDB_CODE_LAST_SESSION_NOT_FINISHED(5, "last session not finished"), + TSDB_CODE_INVALID_SESSION_ID(6, "invalid session ID"), + TSDB_CODE_INVALID_TRAN_ID(7, "invalid tran ID"), + TSDB_CODE_INVALID_MSG_TYPE(8, "invalid msg type"), + TSDB_CODE_ALREADY_PROCESSED(9, "alredy processed"), + TSDB_CODE_AUTH_FAILURE(10, "authentication failure"), + TSDB_CODE_WRONG_MSG_SIZE(11, "wrong msg size"), + TSDB_CODE_UNEXPECTED_RESPONSE(12, "unexpected response"), + TSDB_CODE_INVALID_RESPONSE_TYPE(13, "invalid response type"), + TSDB_CODE_NO_RESOURCE(14, "no resource"), + TSDB_CODE_INVALID_TIME_STAMP(15, "invalid time stamp"), + TSDB_CODE_MISMATCHED_METER_ID(16, "mismatched meter ID"), + TSDB_CODE_ACTION_TRANS_NOT_FINISHED(17, "transcation not finished"), + TSDB_CODE_ACTION_NOT_ONLINE(18, "not online"), + TSDB_CODE_ACTION_SEND_FAILD(19, "send failed"), + TSDB_CODE_NOT_ACTIVE_SESSION(20, "not active session"), + TSDB_CODE_INSERT_FAILED(21, "insert failed"), + TSDB_CODE_APP_ERROR(22, "App error"), + TSDB_CODE_INVALID_IE(23, "invalid IE"), + TSDB_CODE_INVALID_VALUE(24, "invalid value"), + TSDB_CODE_REDIRECT(25, "service not available"), + TSDB_CODE_ALREADY_THERE(26, "already there"), + TSDB_CODE_INVALID_METER_ID(27, "invalid meter ID"), + TSDB_CODE_INVALID_SQL(28, "invalid SQL"), // this message often comes with additional info which will vary based on the specific error situation + TSDB_CODE_NETWORK_UNAVAIL(29, "failed to connect to server"), + TSDB_CODE_INVALID_MSG_LEN(30, "invalid msg len"), + TSDB_CODE_INVALID_DB(31, "invalid DB"), + TSDB_CODE_INVALID_TABLE(32, "invalid table"), + TSDB_CODE_DB_ALREADY_EXIST(33, "DB already there"), + TSDB_CODE_TABLE_ALREADY_EXIST(34, "table already there"), + TSDB_CODE_INVALID_USER(35, "invalid user name"), + TSDB_CODE_INVALID_ACCT(36, "invalid acct name"), + TSDB_CODE_INVALID_PASS(37, "invalid password"), + TSDB_CODE_DB_NOT_SELECTED(38, "DB not selected"), + TSDB_CODE_MEMORY_CORRUPTED(39, "memory corrupted"), + TSDB_CODE_USER_ALREADY_EXIST(40, "user name exists"), + TSDB_CODE_NO_RIGHTS(41, "not authorized"), + TSDB_CODE_DISCONNECTED(42, "login disconnected), login again"), + TSDB_CODE_NO_MASTER(43, "mgmt master node not available"), + TSDB_CODE_NOT_CONFIGURED(44, "not configured"), + TSDB_CODE_INVALID_OPTION(45, "invalid option"), + TSDB_CODE_NODE_OFFLINE(46, "node offline"), + TSDB_CODE_SYNC_REQUIRED(47, "sync required"), + TSDB_CODE_NO_ENOUGH_PNODES(48, "more dnodes are needed"), + TSDB_CODE_UNSYNCED(49, "node in unsynced state"), + TSDB_CODE_TOO_SLOW(50, "too slow"), + TSDB_CODE_OTHERS(51, "others"), + TSDB_CODE_NO_REMOVE_MASTER(52, "can't remove dnode which is master"), + TSDB_CODE_WRONG_SCHEMA(53, "wrong schema"), + TSDB_CODE_NO_RESULT(54, "no results"), + TSDB_CODE_TOO_MANY_USERS(55, "num of users execeed maxUsers"), + TSDB_CODE_TOO_MANY_DATABSES(56, "num of databases execeed maxDbs"), + TSDB_CODE_TOO_MANY_TABLES(57, "num of tables execeed maxTables"), + TSDB_CODE_TOO_MANY_DNODES(58, "num of dnodes execeed maxDnodes"), + TSDB_CODE_TOO_MANY_ACCTS(59, "num of accounts execeed maxAccts"), + TSDB_CODE_ACCT_ALREADY_EXIST(60, "accout name exists"), + TSDB_CODE_DNODE_ALREADY_EXIST(61, "dnode ip exists"), + TSDB_CODE_SDB_ERROR(62, "sdb error"), + TSDB_CODE_METRICMETA_EXPIRED(63, "metric meta expired"), // local cached metric-meta expired causes error in metric query + TSDB_CODE_NOT_READY(64, "not ready"), // peer is not ready to process data + TSDB_CODE_MAX_SESSIONS(65, "too many sessions on server"), // too many sessions + TSDB_CODE_MAX_CONNECTIONS(66, "too many sessions from app"), // too many connections + TSDB_CODE_SESSION_ALREADY_EXIST(67, "session to dest is already there"), + TSDB_CODE_NO_QSUMMARY(68, "query list not there), please show again"), + TSDB_CODE_SERV_OUT_OF_MEMORY(69, "server out of memory"), + TSDB_CODE_INVALID_QHANDLE(70, "invalid query handle"), + TSDB_CODE_RELATED_TABLES_EXIST(71, "tables related to metric exist"), + TSDB_CODE_MONITOR_DB_FORBEIDDEN(72, "can't drop monitor database or tables"), + TSDB_CODE_VG_COMMITLOG_INIT_FAILED(73, "commit log init failed"), + TSDB_CODE_VG_INIT_FAILED(74, "vgroup init failed"), + TSDB_CODE_DATA_ALREADY_IMPORTED(75, "data is already imported"), + TSDB_CODE_OPS_NOT_SUPPORT(76, "not supported operation"), + TSDB_CODE_INVALID_QUERY_ID(77, "invalid query id string"), + TSDB_CODE_INVALID_STREAM_ID(78, "invalid stream id string"), + TSDB_CODE_INVALID_CONNECTION(79, "invalid connection string"), + TSDB_CODE_ACTION_NOT_BALANCED(80, "dnode not balanced"), + TSDB_CODE_CLI_OUT_OF_MEMORY(81, "client out of memory"), + TSDB_CODE_DATA_OVERFLOW(82, "data value overflow"), + TSDB_CODE_QUERY_CANCELLED(83, "query cancelled"), + TSDB_CODE_GRANT_POINT_LIMITED(84, "grant points limited"), + TSDB_CODE_GRANT_EXPIRED(85, "grant expired"), + TSDB_CODE_CLI_NO_DISKSPACE(86, "client no disk space"), + TSDB_CODE_FILE_CORRUPTED(87, "DB file corrupted"), + TSDB_CODE_INVALID_CLIENT_VERSION(88, "version of client and server not match"); + + private long errCode; + private String errMessage; + private static Map errorCodeMap = new HashMap<>(86); + static { + errorCodeMap.put(0, "success"); + errorCodeMap.put(1, "in progress"); + errorCodeMap.put(5, "last session not finished"); + errorCodeMap.put(6, "invalid session ID"); + errorCodeMap.put(7, "invalid tran ID"); + errorCodeMap.put(8, "invalid msg type"); + errorCodeMap.put(9, "alredy processed"); + errorCodeMap.put(10, "authentication failure"); + errorCodeMap.put(11, "wrong msg size"); + errorCodeMap.put(12, "unexpected response"); + errorCodeMap.put(13, "invalid response type"); + errorCodeMap.put(14, "no resource"); + errorCodeMap.put(15, "invalid time stamp"); + errorCodeMap.put(16, "mismatched meter ID"); + errorCodeMap.put(17, "transcation not finished"); + errorCodeMap.put(18, "not online"); + errorCodeMap.put(19, "send failed"); + errorCodeMap.put(20, "not active session"); + errorCodeMap.put(21, "insert failed"); + errorCodeMap.put(22, "App error"); + errorCodeMap.put(23, "invalid IE"); + errorCodeMap.put(24, "invalid value"); + errorCodeMap.put(25, "service not available"); + errorCodeMap.put(26, "already there"); + errorCodeMap.put(27, "invalid meter ID"); + errorCodeMap.put(28, "invalid SQL"); // this message often comes with additional info which will vary based on the specific error situation + errorCodeMap.put(29, "failed to connect to server"); + errorCodeMap.put(30, "invalid msg len"); + errorCodeMap.put(31, "invalid DB"); + errorCodeMap.put(32, "invalid table"); + errorCodeMap.put(33, "DB already there"); + errorCodeMap.put(34, "table already there"); + errorCodeMap.put(35, "invalid user name"); + errorCodeMap.put(36, "invalid acct name"); + errorCodeMap.put(37, "invalid password"); + errorCodeMap.put(38, "DB not selected"); + errorCodeMap.put(39, "memory corrupted"); + errorCodeMap.put(40, "user name exists"); + errorCodeMap.put(41, "not authorized"); + errorCodeMap.put(42, "login disconnected); login again"); + errorCodeMap.put(43, "mgmt master node not available"); + errorCodeMap.put(44, "not configured"); + errorCodeMap.put(45, "invalid option"); + errorCodeMap.put(46, "node offline"); + errorCodeMap.put(47, "sync required"); + errorCodeMap.put(48, "more dnodes are needed"); + errorCodeMap.put(49, "node in unsynced state"); + errorCodeMap.put(50, "too slow"); + errorCodeMap.put(51, "others"); + errorCodeMap.put(52, "can't remove dnode which is master"); + errorCodeMap.put(53, "wrong schema"); + errorCodeMap.put(54, "no results"); + errorCodeMap.put(55, "num of users execeed maxUsers"); + errorCodeMap.put(56, "num of databases execeed maxDbs"); + errorCodeMap.put(57, "num of tables execeed maxTables"); + errorCodeMap.put(58, "num of dnodes execeed maxDnodes"); + errorCodeMap.put(59, "num of accounts execeed maxAccts"); + errorCodeMap.put(60, "accout name exists"); + errorCodeMap.put(61, "dnode ip exists"); + errorCodeMap.put(62, "sdb error"); + errorCodeMap.put(63, "metric meta expired"); // local cached metric-meta expired causes error in metric query + errorCodeMap.put(64, "not ready"); // peer is not ready to process data + errorCodeMap.put(65, "too many sessions on server"); // too many sessions + errorCodeMap.put(66, "too many sessions from app"); // too many connections + errorCodeMap.put(67, "session to dest is already there"); + errorCodeMap.put(68, "query list not there); please show again"); + errorCodeMap.put(69, "server out of memory"); + errorCodeMap.put(70, "invalid query handle"); + errorCodeMap.put(71, "tables related to metric exist"); + errorCodeMap.put(72, "can't drop monitor database or tables"); + errorCodeMap.put(73, "commit log init failed"); + errorCodeMap.put(74, "vgroup init failed"); + errorCodeMap.put(75, "data is already imported"); + errorCodeMap.put(76, "not supported operation"); + errorCodeMap.put(77, "invalid query id string"); + errorCodeMap.put(78, "invalid stream id string"); + errorCodeMap.put(79, "invalid connection string"); + errorCodeMap.put(80, "dnode not balanced"); + errorCodeMap.put(81, "client out of memory"); + errorCodeMap.put(82, "data value overflow"); + errorCodeMap.put(83, "query cancelled"); + errorCodeMap.put(84, "grant points limited"); + errorCodeMap.put(85, "grant expired"); + errorCodeMap.put(86, "client no disk space"); + errorCodeMap.put(87, "DB file corrupted"); + errorCodeMap.put(88, "version of client and server not match"); + } + + TSDBError(long code, String message) { + this.errCode = code; + this.errMessage = message; + } + + public long getErrCode() { + return this.errCode; + } + + public String getErrMessage() { + return this.errMessage; + } + + public static String getErrMessageByCode(long errCode) { + return errorCodeMap.get(errCode); + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java new file mode 100755 index 000000000000..b1c38914241f --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -0,0 +1,269 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.List; + +public class TSDBJNIConnector { + static final long INVALID_CONNECTION_POINTER_VALUE = 0l; + static volatile Boolean isInitialized = false; + + static { + System.loadLibrary("taos"); + } + + /** + * Connection pointer used in C + */ + private long taos = INVALID_CONNECTION_POINTER_VALUE; + + /** + * result set status in current connection + */ + private boolean isResultsetClosed = true; + private int affectedRows = -1; + + /** + * Whether the connection is closed + */ + public boolean isClosed() { + return this.taos == INVALID_CONNECTION_POINTER_VALUE; + } + + /** + * Returns the status of last result set in current connection + * @return + */ + public boolean isResultsetClosed() { + return this.isResultsetClosed; + } + + /** + * Initialize static variables in JNI to optimize performance + */ + public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning { + synchronized(isInitialized) { + if (!isInitialized) { + initImp(configDir); + if (setOptions(0, locale) < 0) { + throw new SQLWarning(TSDBConstants.WrapErrMsg("Failed to set locale: " + locale + ". System default will be used.")); + } + if (setOptions(1, charset) < 0) { + throw new SQLWarning(TSDBConstants.WrapErrMsg("Failed to set charset: " + charset + ". System default will be used.")); + } + if (setOptions(2, timezone) < 0) { + throw new SQLWarning(TSDBConstants.WrapErrMsg("Failed to set timezone: " + timezone + ". System default will be used.")); + } + isInitialized = true; + TaosGlobalConfig.setCharset(getTsCharset()); + } + } + } + + public static native void initImp(String configDir); + + public static native int setOptions(int optionIndex, String optionValue); + + public static native String getTsCharset(); + + /** + * Get connection pointer + * + * @throws SQLException + */ + public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { + if (this.taos != INVALID_CONNECTION_POINTER_VALUE) { + this.closeConnectionImp(this.taos); + this.taos = INVALID_CONNECTION_POINTER_VALUE; + } + + this.taos = this.connectImp(host, port, dbName, user, password); + if (this.taos == INVALID_CONNECTION_POINTER_VALUE) { + throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg()), "", this.getErrCode()); + } + + return true; + } + + private native long connectImp(String host, int port, String dbName, String user, String password); + + /** + * Execute DML/DDL operation + * + * @throws SQLException + */ + public int executeQuery(String sql) throws SQLException { + if (!this.isResultsetClosed) { + //throw new RuntimeException(TSDBConstants.WrapErrMsg("Connection already has an open result set")); + long resultSetPointer = this.getResultSet(); + if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + //do nothing + } else { + this.freeResultSet(resultSetPointer); + } + } + + int code; + try { + code = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos); + } catch (Exception e) { + e.printStackTrace(); + throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding")); + } + affectedRows = code; + if (code < 0) { + affectedRows = -1; + if (code == TSDBConstants.JNI_TDENGINE_ERROR) { + throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg()), "", this.getErrCode()); + } else { + throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode()); + } + } + + return code; + } + + private native int executeQueryImp(byte[] sqlBytes, long connection); + + /** + * Get recent error code by connection + */ + public int getErrCode() { + return Math.abs(this.getErrCodeImp(this.taos)); + } + + private native int getErrCodeImp(long connection); + + /** + * Get recent error message by connection + */ + public String getErrMsg() { + return this.getErrMsgImp(this.taos); + } + + private native String getErrMsgImp(long connection); + + /** + * Get resultset pointer + * Each connection should have a single open result set at a time + */ + public long getResultSet() { + long res = this.getResultSetImp(this.taos); + return res; + } + + private native long getResultSetImp(long connection); + + /** + * Free resultset operation from C to release resultset pointer by JNI + */ + public int freeResultSet(long result) { + int res = this.freeResultSetImp(this.taos, result); + this.isResultsetClosed = true; // reset resultSetPointer to 0 after freeResultSetImp() return + return res; + } + + private native int freeResultSetImp(long connection, long result); + + /** + * Get affected rows count + */ + public int getAffectedRows() { + int affectedRows = this.affectedRows; + if (affectedRows < 0) { + affectedRows = this.getAffectedRowsImp(this.taos); + } + return affectedRows; + } + + private native int getAffectedRowsImp(long connection); + + /** + * Get schema metadata + */ + public int getSchemaMetaData(long resultSet, List columnMetaData) { + return this.getSchemaMetaDataImp(this.taos, resultSet, columnMetaData); + } + + private native int getSchemaMetaDataImp(long connection, long resultSet, List columnMetaData); + + /** + * Get one row data + */ + public int fetchRow(long resultSet, TSDBResultSetRowData rowData) { + return this.fetchRowImp(this.taos, resultSet, rowData); + } + + private native int fetchRowImp(long connection, long resultSet, TSDBResultSetRowData rowData); + + /** + * Execute close operation from C to release connection pointer by JNI + * + * @throws SQLException + */ + public void closeConnection() throws SQLException { + int code = this.closeConnectionImp(this.taos); + if (code < 0) { + throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode()); + } else if (code == 0){ + this.taos = INVALID_CONNECTION_POINTER_VALUE; + } else { + throw new SQLException("Undefined error code returned by TDengine when closing a connection"); + } + } + + private native int closeConnectionImp(long connection); + + /** + * Subscribe to a table in TSDB + */ + public long subscribe(String host, String user, String password, String database, String table, long time, int period){ + return subscribeImp(host, user, password, database, table, time, period); + } + + private native long subscribeImp(String host, String user, String password, String database, String table, long time, int period); + + /** + * Consume a subscribed table + */ + public TSDBResultSetRowData consume(long subscription) { + return this.consumeImp(subscription); + } + + private native TSDBResultSetRowData consumeImp(long subscription); + + /** + * Unsubscribe a table + * @param subscription + */ + public void unsubscribe(long subscription) { + unsubscribeImp(subscription); + } + + private native void unsubscribeImp(long subscription); + + /** + * Validate if a create table sql statement is correct without actually creating that table + */ + public boolean validateCreateTableSql(String sql) { + long connection = taos; + int res = validateCreateTableSqlImp(connection, sql.getBytes()); + return res != 0 ? false : true; + } + + private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java new file mode 100644 index 000000000000..d9227523d4ac --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java @@ -0,0 +1,75 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.ParameterMetaData; +import java.sql.SQLException; + +public class TSDBParameterMetaData implements ParameterMetaData { + @Override + public int getParameterCount() throws SQLException { + return 0; + } + + @Override + public int isNullable(int param) throws SQLException { + return 0; + } + + @Override + public boolean isSigned(int param) throws SQLException { + return false; + } + + @Override + public int getPrecision(int param) throws SQLException { + return 0; + } + + @Override + public int getScale(int param) throws SQLException { + return 0; + } + + @Override + public int getParameterType(int param) throws SQLException { + return 0; + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + return null; + } + + @Override + public String getParameterClassName(int param) throws SQLException { + return null; + } + + @Override + public int getParameterMode(int param) throws SQLException { + return 0; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java new file mode 100644 index 000000000000..d613c252f03a --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -0,0 +1,412 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { + protected String rawSql; + protected String sql; + protected ArrayList parameters = new ArrayList(); + + TSDBPreparedStatement(TSDBJNIConnector connecter, String sql) { + super(connecter); + this.rawSql = sql; + preprocessSql(); + } + + public ArrayList getParameters() { + return parameters; + } + + public void setParameters(ArrayList parameters) { + this.parameters = parameters; + } + + public String getRawSql() { + return rawSql; + } + + /* + * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by + * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in + * order to process those supported SQLs. + */ + private void preprocessSql() { + + /***** For processing some of Spark SQLs*****/ + // should replace it first + this.rawSql = this.rawSql.replaceAll("or (.*) is null", ""); + this.rawSql = this.rawSql.replaceAll(" where ", " WHERE "); + this.rawSql = this.rawSql.replaceAll(" or ", " OR "); + this.rawSql = this.rawSql.replaceAll(" and ", " AND "); + this.rawSql = this.rawSql.replaceAll(" is null", " IS NULL"); + this.rawSql = this.rawSql.replaceAll(" is not null", " IS NOT NULL"); + + // SELECT * FROM db.tb WHERE 1=0 + this.rawSql = this.rawSql.replaceAll("WHERE 1=0", "WHERE _c0=1"); + this.rawSql = this.rawSql.replaceAll("WHERE 1=2", "WHERE _c0=1"); + + // SELECT "ts","val" FROM db.tb + this.rawSql = this.rawSql.replaceAll("\"", ""); + + // SELECT 1 FROM db.tb + this.rawSql = this.rawSql.replaceAll("SELECT 1 FROM", "SELECT * FROM"); + + // SELECT "ts","val" FROM db.tb WHERE ts < 33 or ts is null + this.rawSql = this.rawSql.replaceAll("OR (.*) IS NULL", ""); + + // SELECT "ts","val" FROM db.tb WHERE ts is null or ts < 33 + this.rawSql = this.rawSql.replaceAll("(.*) IS NULL OR", ""); + + // SELECT 1 FROM db.tb WHERE (("val" IS NOT NULL) AND ("val" > 50)) AND (ts >= 66) + this.rawSql = this.rawSql.replaceAll("\\(\\((.*) IS NOT NULL\\) AND", "("); + + // SELECT 1 FROM db.tb WHERE ("val" IS NOT NULL) AND ("val" > 50) AND (ts >= 66) + this.rawSql = this.rawSql.replaceAll("\\((.*) IS NOT NULL\\) AND", ""); + + // SELECT "ts","val" FROM db.tb WHERE (("val" IS NOT NULL)) AND (ts < 33 or ts is null) + this.rawSql = this.rawSql.replaceAll("\\(\\((.*) IS NOT NULL\\)\\) AND", ""); + + /***** For processing inner subqueries *****/ + Pattern pattern = Pattern.compile("FROM\\s+((\\(.+\\))\\s+SUB_QRY)", Pattern.CASE_INSENSITIVE); + Matcher matcher = pattern.matcher(rawSql); + String tableFullName = ""; + if (matcher.find() && matcher.groupCount() == 2) { + String subQry = matcher.group(2); + Pattern pattern1 = Pattern.compile("FROM\\s+(\\w+\\.\\w+)", Pattern.CASE_INSENSITIVE); + Matcher matcher1 = pattern1.matcher(subQry); + if (matcher1.find() && matcher1.groupCount() == 1) { + tableFullName = matcher1.group(1); + } + rawSql = rawSql.replace(matcher.group(1), tableFullName); + } + /***** for inner queries *****/ + + } + + /** + * Populate parameters into prepared sql statements + * @return a string of the native sql statement for TSDB + */ + private String getNativeSql() { + this.sql = this.rawSql; + for (int i = 0; i < parameters.size(); ++i) { + Object para = parameters.get(i); + if (para != null) { + String paraStr = para.toString(); + if (para instanceof Timestamp || para instanceof String) { + paraStr = "'" + paraStr + "'"; + } + this.sql = this.sql.replaceFirst("[?]", paraStr); + } else { + this.sql = this.sql.replaceFirst("[?]", "NULL"); + } + } + parameters.clear(); + return sql; + } + + @Override + public ResultSet executeQuery() throws SQLException { + return super.executeQuery(getNativeSql()); + } + + @Override + public int executeUpdate() throws SQLException { + return super.executeUpdate(getNativeSql()); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + setObject(parameterIndex, new String("NULL")); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + setObject(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void clearParameters() throws SQLException { + parameters.clear(); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + parameters.add(x); + } + + @Override + public boolean execute() throws SQLException { + return executeUpdate(getNativeSql()) == 0; + } + + @Override + public void addBatch() throws SQLException { + if (this.batchedArgs == null) { + batchedArgs = new ArrayList(); + } + super.addBatch(getNativeSql()); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java new file mode 100644 index 000000000000..8acf77975630 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -0,0 +1,1033 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +@SuppressWarnings("unused") +public class TSDBResultSet implements ResultSet { + private TSDBJNIConnector jniConnector = null; + + private long resultSetPointer = 0L; + private List columnMetaDataList = new ArrayList(); + + private TSDBResultSetRowData rowData; + + private boolean lastWasNull = false; + private final int COLUMN_INDEX_START_VALUE = 1; + + public TSDBJNIConnector getJniConnector() { + return jniConnector; + } + + public void setJniConnector(TSDBJNIConnector jniConnector) { + this.jniConnector = jniConnector; + } + + public long getResultSetPointer() { + return resultSetPointer; + } + + public void setResultSetPointer(long resultSetPointer) { + this.resultSetPointer = resultSetPointer; + } + + public List getColumnMetaDataList() { + return columnMetaDataList; + } + + public void setColumnMetaDataList(List columnMetaDataList) { + this.columnMetaDataList = columnMetaDataList; + } + + public TSDBResultSetRowData getRowData() { + return rowData; + } + + public void setRowData(TSDBResultSetRowData rowData) { + this.rowData = rowData; + } + + public boolean isLastWasNull() { + return lastWasNull; + } + + public void setLastWasNull(boolean lastWasNull) { + this.lastWasNull = lastWasNull; + } + + public TSDBResultSet() { + } + + public TSDBResultSet(TSDBJNIConnector connecter, long resultSetPointer) throws SQLException { + this.jniConnector = connecter; + this.resultSetPointer = resultSetPointer; + int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList); + if (code == TSDBConstants.JNI_CONNECTION_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); + } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); + } + + this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size()); + } + + public T unwrap(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isWrapperFor(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean next() throws SQLException { + if (rowData != null) { + this.rowData.clear(); + } + + int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData); + if (code == TSDBConstants.JNI_CONNECTION_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); + } else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0)); + } else if (code == TSDBConstants.JNI_FETCH_END) { + return false; + } else { + return true; + } + } + + public void close() throws SQLException { + if (this.jniConnector != null) { + int code = this.jniConnector.freeResultSet(this.resultSetPointer); + if (code == TSDBConstants.JNI_CONNECTION_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } else if (code == TSDBConstants.JNI_RESULT_SET_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL)); + } + } + } + + public boolean wasNull() throws SQLException { + return this.lastWasNull; + } + + public String getString(int columnIndex) throws SQLException { + String res = null; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public boolean getBoolean(int columnIndex) throws SQLException { + boolean res = false; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public byte getByte(int columnIndex) throws SQLException { + byte res = 0; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public short getShort(int columnIndex) throws SQLException { + short res = 0; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public int getInt(int columnIndex) throws SQLException { + int res = 0; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public long getLong(int columnIndex) throws SQLException { + long res = 0l; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public float getFloat(int columnIndex) throws SQLException { + float res = 0; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + public double getDouble(int columnIndex) throws SQLException { + double res = 0; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + } + return res; + } + + /* + * (non-Javadoc) + * + * @see java.sql.ResultSet#getBigDecimal(int, int) + * + * @deprecated Use {@code getBigDecimal(int columnIndex)} or {@code + * getBigDecimal(String columnLabel)} + */ + @Deprecated + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + BigDecimal res = null; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType())); + } + return res; + } + + public byte[] getBytes(int columnIndex) throws SQLException { + byte[] res = null; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()).getBytes(); + } + return res; + } + + public Date getDate(int columnIndex) throws SQLException { + int colIndex = getTrueColumnIndex(columnIndex); + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Time getTime(int columnIndex) throws SQLException { + int colIndex = getTrueColumnIndex(columnIndex); + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Timestamp getTimestamp(int columnIndex) throws SQLException { + Timestamp res = null; + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + if (!lastWasNull) { + res = this.rowData.getTimestamp(colIndex); + } + return res; + } + + public InputStream getAsciiStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + /* + * (non-Javadoc) + * + * @see java.sql.ResultSet#getUnicodeStream(int) + * + * * @deprecated use getCharacterStream in place of + * getUnicodeStream + */ + @Deprecated + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public InputStream getBinaryStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String getString(String columnLabel) throws SQLException { + return this.getString(this.findColumn(columnLabel)); + } + + public boolean getBoolean(String columnLabel) throws SQLException { + return this.getBoolean(this.findColumn(columnLabel)); + } + + public byte getByte(String columnLabel) throws SQLException { + return this.getByte(this.findColumn(columnLabel)); + } + + public short getShort(String columnLabel) throws SQLException { + return this.getShort(this.findColumn(columnLabel)); + } + + public int getInt(String columnLabel) throws SQLException { + return this.getInt(this.findColumn(columnLabel)); + } + + public long getLong(String columnLabel) throws SQLException { + return this.getLong(this.findColumn(columnLabel)); + } + + public float getFloat(String columnLabel) throws SQLException { + return this.getFloat(this.findColumn(columnLabel)); + } + + public double getDouble(String columnLabel) throws SQLException { + return this.getDouble(this.findColumn(columnLabel)); + } + + /* + * used by spark + */ + @Deprecated + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + return this.getBigDecimal(this.findColumn(columnLabel), scale); + } + + public byte[] getBytes(String columnLabel) throws SQLException { + return this.getBytes(this.findColumn(columnLabel)); + } + + public Date getDate(String columnLabel) throws SQLException { + return this.getDate(this.findColumn(columnLabel)); + } + + public Time getTime(String columnLabel) throws SQLException { + return this.getTime(this.findColumn(columnLabel)); + } + + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return this.getTimestamp(this.findColumn(columnLabel)); + } + + public InputStream getAsciiStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Deprecated + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public InputStream getBinaryStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public SQLWarning getWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void clearWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String getCursorName() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSetMetaData getMetaData() throws SQLException { + return new TSDBResultSetMetaData(this.columnMetaDataList); + } + + public Object getObject(int columnIndex) throws SQLException { + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + return this.rowData.get(colIndex); + } + + public Object getObject(String columnLabel) throws SQLException { + return this.getObject(this.findColumn(columnLabel)); + } + + public int findColumn(String columnLabel) throws SQLException { + Iterator colMetaDataIt = this.columnMetaDataList.iterator(); + while (colMetaDataIt.hasNext()) { + ColumnMetaData colMetaData = colMetaDataIt.next(); + if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) { + return colMetaData.getColIndex() + 1; + } + } + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } + + public Reader getCharacterStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Reader getCharacterStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + /* + * used by spark + */ + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + int colIndex = getTrueColumnIndex(columnIndex); + + this.lastWasNull = this.rowData.wasNull(colIndex); + return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType())); + } + + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + return this.getBigDecimal(this.findColumn(columnLabel)); + } + + public boolean isBeforeFirst() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isAfterLast() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isFirst() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isLast() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void beforeFirst() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void afterLast() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean first() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean last() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean absolute(int row) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean relative(int rows) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean previous() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setFetchDirection(int direction) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getFetchDirection() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setFetchSize(int rows) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getFetchSize() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getType() throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY; + } + + public int getConcurrency() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean rowUpdated() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean rowInserted() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean rowDeleted() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNull(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateByte(int columnIndex, byte x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateShort(int columnIndex, short x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateInt(int columnIndex, int x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateLong(int columnIndex, long x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateFloat(int columnIndex, float x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateDouble(int columnIndex, double x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateString(int columnIndex, String x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateDate(int columnIndex, Date x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateTime(int columnIndex, Time x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateObject(int columnIndex, Object x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNull(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateByte(String columnLabel, byte x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateShort(String columnLabel, short x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateInt(String columnLabel, int x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateLong(String columnLabel, long x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateFloat(String columnLabel, float x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateDouble(String columnLabel, double x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateString(String columnLabel, String x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateDate(String columnLabel, Date x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateTime(String columnLabel, Time x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateObject(String columnLabel, Object x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void insertRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void deleteRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void refreshRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void cancelRowUpdates() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void moveToInsertRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void moveToCurrentRow() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Statement getStatement() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Object getObject(int columnIndex, Map> map) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Blob getBlob(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Clob getClob(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Array getArray(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Object getObject(String columnLabel, Map> map) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Ref getRef(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Blob getBlob(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Clob getClob(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Array getArray(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public URL getURL(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public URL getURL(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBlob(String columnLabel, Blob x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateClob(String columnLabel, Clob x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateArray(int columnIndex, Array x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateArray(String columnLabel, Array x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public RowId getRowId(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public RowId getRowId(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateRowId(String columnLabel, RowId x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getHoldability() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isClosed() throws SQLException { + boolean isClosed = true; + if (jniConnector != null) { + isClosed = jniConnector.isResultsetClosed(); + } + return isClosed; + } + + public void updateNString(int columnIndex, String nString) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNString(String columnLabel, String nString) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public NClob getNClob(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public NClob getNClob(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public SQLXML getSQLXML(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public SQLXML getSQLXML(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String getNString(int columnIndex) throws SQLException { + int colIndex = getTrueColumnIndex(columnIndex); + return (String) rowData.get(colIndex); + } + + public String getNString(String columnLabel) throws SQLException { + return (String) this.getString(columnLabel); + } + + public Reader getNCharacterStream(int columnIndex) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public Reader getNCharacterStream(String columnLabel) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public T getObject(int columnIndex, Class type) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public T getObject(String columnLabel, Class type) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + private int getTrueColumnIndex(int columnIndex) throws SQLException { + if (columnIndex < this.COLUMN_INDEX_START_VALUE) { + throw new SQLException("Column Index out of range, " + columnIndex + " < " + this.COLUMN_INDEX_START_VALUE); + } + + int numOfCols = this.columnMetaDataList.size(); + if (columnIndex > numOfCols) { + throw new SQLException("Column Index out of range, " + columnIndex + " > " + numOfCols); + } + + return columnIndex - 1; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java new file mode 100644 index 000000000000..d6d69bd8b02d --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java @@ -0,0 +1,213 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.List; + +public class TSDBResultSetMetaData implements ResultSetMetaData { + + List colMetaDataList = null; + + public TSDBResultSetMetaData(List metaDataList) { + this.colMetaDataList = metaDataList; + } + + public T unwrap(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isWrapperFor(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getColumnCount() throws SQLException { + return colMetaDataList.size(); + } + + public boolean isAutoIncrement(int column) throws SQLException { + return false; + } + + public boolean isCaseSensitive(int column) throws SQLException { + return false; + } + + public boolean isSearchable(int column) throws SQLException { + if (column == 1) { + return true; + } + return false; + } + + public boolean isCurrency(int column) throws SQLException { + return false; + } + + public int isNullable(int column) throws SQLException { + if (column == 1) { + return columnNoNulls; + } + return columnNullable; + } + + public boolean isSigned(int column) throws SQLException { + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + switch (meta.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return true; + default: + return false; + } + } + + public int getColumnDisplaySize(int column) throws SQLException { + return colMetaDataList.get(column - 1).getColSize(); + } + + public String getColumnLabel(int column) throws SQLException { + return colMetaDataList.get(column - 1).getColName(); + } + + public String getColumnName(int column) throws SQLException { + return colMetaDataList.get(column - 1).getColName(); + } + + public String getSchemaName(int column) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getPrecision(int column) throws SQLException { + ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1); + switch (columnMetaData.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + return 5; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return 9; + case TSDBConstants.TSDB_DATA_TYPE_BINARY: + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + return columnMetaData.getColSize(); + default: + return 0; + } + } + + public int getScale(int column) throws SQLException { + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + switch (meta.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + return 5; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return 9; + default: + return 0; + } + } + + public String getTableName(int column) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String getCatalogName(int column) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getColumnType(int column) throws SQLException { + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + switch (meta.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: + return java.sql.Types.BIT; + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + return java.sql.Types.TINYINT; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + return java.sql.Types.SMALLINT; + case TSDBConstants.TSDB_DATA_TYPE_INT: + return java.sql.Types.INTEGER; + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + return java.sql.Types.BIGINT; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + return java.sql.Types.FLOAT; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return java.sql.Types.DOUBLE; + case TSDBConstants.TSDB_DATA_TYPE_BINARY: + return java.sql.Types.CHAR; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + return java.sql.Types.BIGINT; + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + return java.sql.Types.CHAR; + } + throw new SQLException(TSDBConstants.INVALID_VARIABLES); + } + + public String getColumnTypeName(int column) throws SQLException { + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + return TSDBConstants.DATATYPE_MAP.get(meta.getColType()); + } + + public boolean isReadOnly(int column) throws SQLException { + return true; + } + + public boolean isWritable(int column) throws SQLException { + return false; + } + + public boolean isDefinitelyWritable(int column) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public String getColumnClassName(int column) throws SQLException { + int columnType = getColumnType(column); + String columnClassName = ""; + switch (columnType) { + case Types.TIMESTAMP: + columnClassName = Timestamp.class.getName(); + break; + case Types.CHAR: + columnClassName = String.class.getName(); + break; + case Types.DOUBLE: + columnClassName = Double.class.getName(); + break; + case Types.FLOAT: + columnClassName = Float.class.getName(); + break; + case Types.BIGINT: + columnClassName = Long.class.getName(); + break; + case Types.INTEGER: + columnClassName = Integer.class.getName(); + break; + case Types.SMALLINT: + columnClassName = Short.class.getName(); + break; + case Types.TINYINT: + columnClassName = Byte.class.getName(); + break; + case Types.BIT: + columnClassName = Boolean.class.getName(); + break; + } + return columnClassName; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java new file mode 100644 index 000000000000..8efcac90001b --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -0,0 +1,222 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collections; + +public class TSDBResultSetRowData { + private ArrayList data = null; + private int colSize = 0; + + public TSDBResultSetRowData(int colSize) { + this.setColSize(colSize); + } + + public TSDBResultSetRowData() { + this.data = new ArrayList(); + this.setColSize(0); + } + + public void clear() { + if(this.data != null) { + this.data.clear(); + } + if (this.colSize == 0) { + return; + } + this.data = new ArrayList(colSize); + this.data.addAll(Collections.nCopies(this.colSize, null)); + } + + public boolean wasNull(int col) { + return data.get(col) == null; + } + + public void setBoolean(int col, boolean value) { + data.set(col, value); + } + + public boolean getBoolean(int col, int srcType) throws SQLException { + Object obj = data.get(col); + + switch(srcType) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: return (Boolean) obj; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj) == 1.0? Boolean.TRUE:Boolean.FALSE; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double) obj) == 1.0? Boolean.TRUE:Boolean.FALSE; + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return ((Byte) obj) == 1? Boolean.TRUE:Boolean.FALSE; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return ((Short)obj) == 1? Boolean.TRUE:Boolean.FALSE; + case TSDBConstants.TSDB_DATA_TYPE_INT: return ((Integer)obj) == 1? Boolean.TRUE:Boolean.FALSE; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj) == 1L? Boolean.TRUE:Boolean.FALSE; + } + + return Boolean.TRUE; + } + + public void setByte(int col, byte value) { + data.set(col, value); + } + + public void setShort(int col, short value) { + data.set(col, value); + } + + public void setInt(int col, int value) { + data.set(col, value); + } + + public int getInt(int col, int srcType) throws SQLException { + Object obj = data.get(col); + + switch(srcType) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).intValue(); + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double)obj).intValue(); + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj; + case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj).intValue(); + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Integer.parseInt((String) obj); + } + + return 0; + } + + public void setLong(int col, long value) { + data.set(col, value); + } + + public long getLong(int col, int srcType) throws SQLException { + Object obj = data.get(col); + + switch(srcType) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).longValue(); + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double) obj).longValue(); + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj; + case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj; + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Long.parseLong((String) obj); + } + + return 0; + } + + public void setFloat(int col, float value) { + data.set(col, value); + } + + public float getFloat(int col, int srcType) throws SQLException { + Object obj = data.get(col); + + switch(srcType) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return (Float) obj; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double) obj).floatValue(); + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: return (Short) obj; + case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj; + } + + return 0; + } + + public void setDouble(int col, double value) { + data.set(col, value); + } + + public double getDouble(int col, int srcType) throws SQLException { + Object obj = data.get(col); + + switch(srcType) { + case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return (Float) obj; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return (Double) obj; + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj; + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj; + case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj; + } + + return 0; + } + + public void setString(int col, String value) { + data.set(col, value); + } + + public void setByteArray(int col, byte[] value) { + try { + data.set(col, new String(value, TaosGlobalConfig.getCharset())); + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * The original type may not be a string type, but will be converted to by calling this method + * @param col column index + * @return + * @throws SQLException + */ + public String getString(int col, int srcType) throws SQLException { + if (srcType == TSDBConstants.TSDB_DATA_TYPE_BINARY || srcType == TSDBConstants.TSDB_DATA_TYPE_NCHAR) { + return (String) data.get(col); + } else { + return String.valueOf(data.get(col)); + } + } + + public void setTimestamp(int col, long ts) { + data.set(col, ts); + } + + public Timestamp getTimestamp(int col) { + return new Timestamp((Long) data.get(col)); + } + + public Object get(int col) { + return data.get(col); + } + + public int getColSize() { + return colSize; + } + + public void setColSize(int colSize) { + this.colSize = colSize; + this.clear(); + } + + public ArrayList getData() { + return data; + } + + public void setData(ArrayList data) { + this.data = (ArrayList) data.clone(); + } + +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java new file mode 100644 index 000000000000..059962a7a120 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java @@ -0,0 +1,1172 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.*; +import java.util.Calendar; +import java.util.Map; + +/* + * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * standard JDBC API contains more or less some adjustments customized for certain + * compatibility needs. + */ +public class TSDBResultSetWrapper implements ResultSet { + + private ResultSet originalResultSet; + + public ResultSet getOriginalResultSet() { + return originalResultSet; + } + + public void setOriginalResultSet(ResultSet originalResultSet) { + this.originalResultSet = originalResultSet; + } + + @Override + public boolean next() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.next(); + } + + @Override + public void close() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + this.originalResultSet.close(); + } + + @Override + public boolean wasNull() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.wasNull(); + } + + @Override + public String getString(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getString(columnIndex); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBoolean(columnIndex); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getByte(columnIndex); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getShort(columnIndex); + } + + @Override + public int getInt(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getInt(columnIndex); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getLong(columnIndex); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getFloat(columnIndex); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getDouble(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBigDecimal(columnIndex, scale); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + if (columnIndex <= 1) { + return this.originalResultSet.getBytes(columnIndex); + } else { + return null; + } + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getDate(columnIndex); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getTime(columnIndex); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getTimestamp(columnIndex); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getAsciiStream(columnIndex); + } + + @Override + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getUnicodeStream(columnIndex); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBinaryStream(columnIndex); + } + + @Override + public String getString(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getString(columnLabel); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBoolean(columnLabel); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getByte(columnLabel); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getShort(columnLabel); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getInt(columnLabel); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getLong(columnLabel); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getFloat(columnLabel); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getDouble(columnLabel); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBigDecimal(columnLabel, scale); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBytes(columnLabel); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getDate(columnLabel); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getTime(columnLabel); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getTimestamp(columnLabel); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getAsciiStream(columnLabel); + } + + @Override + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getUnicodeStream(columnLabel); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBinaryStream(columnLabel); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + this.originalResultSet.clearWarnings(); + } + + @Override + public String getCursorName() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getCursorName(); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getMetaData(); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getObject(columnIndex); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getObject(columnLabel); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.findColumn(columnLabel); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getCharacterStream(columnIndex); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getCharacterStream(columnLabel); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBigDecimal(columnIndex); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getBigDecimal(columnLabel); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.isBeforeFirst(); + } + + @Override + public boolean isAfterLast() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.isAfterLast(); + } + + @Override + public boolean isFirst() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.isFirst(); + } + + @Override + public boolean isLast() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.isLast(); + } + + @Override + public void beforeFirst() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + this.originalResultSet.beforeFirst(); + } + + @Override + public void afterLast() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + this.originalResultSet.afterLast(); + } + + @Override + public boolean first() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.first(); + } + + @Override + public boolean last() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.last(); + } + + @Override + public int getRow() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.getRow(); + } + + @Override + public boolean absolute(int row) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.absolute(row); + } + + @Override + public boolean relative(int rows) throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.relative(rows); + } + + @Override + public boolean previous() throws SQLException { + if (originalResultSet == null) { + throw new SQLException("No original result set is injected"); + } + return this.originalResultSet.previous(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + this.originalResultSet.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return this.originalResultSet.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + this.originalResultSet.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return this.originalResultSet.getFetchSize(); + } + + @Override + public int getType() throws SQLException { + return this.originalResultSet.getType(); + } + + @Override + public int getConcurrency() throws SQLException { + return this.originalResultSet.getConcurrency(); + } + + @Override + public boolean rowUpdated() throws SQLException { + return this.originalResultSet.rowUpdated(); + } + + @Override + public boolean rowInserted() throws SQLException { + return this.originalResultSet.rowInserted(); + } + + @Override + public boolean rowDeleted() throws SQLException { + return this.originalResultSet.rowDeleted(); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + this.originalResultSet.updateNull(columnIndex); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + this.originalResultSet.updateBoolean(columnIndex, x); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + this.originalResultSet.updateByte(columnIndex, x); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + this.originalResultSet.updateShort(columnIndex, x); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + this.originalResultSet.updateInt(columnIndex, x); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + this.originalResultSet.updateLong(columnIndex, x); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + this.originalResultSet.updateFloat(columnIndex, x); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + this.originalResultSet.updateDouble(columnIndex, x); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + this.originalResultSet.updateBigDecimal(columnIndex, x); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + this.originalResultSet.updateString(columnIndex, x); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + this.originalResultSet.updateBytes(columnIndex, x); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + this.originalResultSet.updateDate(columnIndex, x); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + this.originalResultSet.updateTime(columnIndex, x); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + this.originalResultSet.updateTimestamp(columnIndex, x); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + this.originalResultSet.updateAsciiStream(columnIndex, x, length); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + this.originalResultSet.updateBinaryStream(columnIndex, x, length); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + this.originalResultSet.updateCharacterStream(columnIndex, x, length); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + this.originalResultSet.updateObject(columnIndex, x, scaleOrLength); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + this.originalResultSet.updateObject(columnIndex, x); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + this.originalResultSet.updateNull(columnLabel); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + this.originalResultSet.updateBoolean(columnLabel, x); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + this.originalResultSet.updateByte(columnLabel, x); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + this.originalResultSet.updateShort(columnLabel, x); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + this.originalResultSet.updateInt(columnLabel, x); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + this.originalResultSet.updateLong(columnLabel, x); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + this.originalResultSet.updateFloat(columnLabel, x); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + this.originalResultSet.updateDouble(columnLabel, x); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + this.originalResultSet.updateBigDecimal(columnLabel, x); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + this.originalResultSet.updateString(columnLabel, x); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + this.originalResultSet.updateBytes(columnLabel, x); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + this.originalResultSet.updateDate(columnLabel, x); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + this.originalResultSet.updateTime(columnLabel, x); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + this.originalResultSet.updateTimestamp(columnLabel, x); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + this.originalResultSet.updateAsciiStream(columnLabel, x, length); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + this.originalResultSet.updateBinaryStream(columnLabel, x, length); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + this.originalResultSet.updateCharacterStream(columnLabel, reader, length); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + this.originalResultSet.updateObject(columnLabel, x, scaleOrLength); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + this.originalResultSet.updateObject(columnLabel, x); + } + + @Override + public void insertRow() throws SQLException { + this.originalResultSet.insertRow(); + } + + @Override + public void updateRow() throws SQLException { + this.originalResultSet.updateRow(); + } + + @Override + public void deleteRow() throws SQLException { + this.originalResultSet.deleteRow(); + } + + @Override + public void refreshRow() throws SQLException { + this.originalResultSet.refreshRow(); + } + + @Override + public void cancelRowUpdates() throws SQLException { + this.originalResultSet.cancelRowUpdates(); + } + + @Override + public void moveToInsertRow() throws SQLException { + this.originalResultSet.moveToInsertRow(); + } + + @Override + public void moveToCurrentRow() throws SQLException { + this.originalResultSet.moveToCurrentRow(); + } + + @Override + public Statement getStatement() throws SQLException { + return this.originalResultSet.getStatement(); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + return this.originalResultSet.getObject(columnIndex, map); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + return this.originalResultSet.getRef(columnIndex); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + return this.originalResultSet.getBlob(columnIndex); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + return this.originalResultSet.getClob(columnIndex); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + return this.originalResultSet.getArray(columnIndex); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return this.originalResultSet.getObject(columnLabel, map); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + return this.originalResultSet.getRef(columnLabel); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + return this.originalResultSet.getBlob(columnLabel); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + return this.originalResultSet.getClob(columnLabel); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + return this.originalResultSet.getArray(columnLabel); + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + return this.originalResultSet.getDate(columnIndex, cal); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return this.originalResultSet.getDate(columnLabel, cal); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + return this.originalResultSet.getTime(columnIndex, cal); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return this.originalResultSet.getTime(columnLabel, cal); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + return this.originalResultSet.getTimestamp(columnIndex, cal); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return this.originalResultSet.getTimestamp(columnLabel, cal); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + return this.originalResultSet.getURL(columnIndex); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + return this.originalResultSet.getURL(columnLabel); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + this.originalResultSet.updateRef(columnIndex, x); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + this.originalResultSet.updateRef(columnLabel, x); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + this.originalResultSet.updateBlob(columnIndex, x); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + this.originalResultSet.updateBlob(columnLabel, x); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + this.originalResultSet.updateClob(columnIndex, x); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + this.originalResultSet.updateClob(columnLabel, x); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + this.originalResultSet.updateArray(columnIndex, x); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + this.originalResultSet.updateArray(columnLabel, x); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + return this.originalResultSet.getRowId(columnIndex); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + return this.originalResultSet.getRowId(columnLabel); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + this.originalResultSet.updateRowId(columnIndex, x); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + this.originalResultSet.updateRowId(columnLabel, x); + } + + @Override + public int getHoldability() throws SQLException { + return this.originalResultSet.getHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return this.originalResultSet.isClosed(); + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + this.originalResultSet.updateNString(columnIndex, nString); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + this.originalResultSet.updateNString(columnLabel, nString); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + this.originalResultSet.updateNClob(columnIndex, nClob); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + this.originalResultSet.updateNClob(columnLabel, nClob); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + return this.originalResultSet.getNClob(columnIndex); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + return this.originalResultSet.getNClob(columnLabel); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + return this.originalResultSet.getSQLXML(columnIndex); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + return this.originalResultSet.getSQLXML(columnLabel); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + this.originalResultSet.updateSQLXML(columnIndex, xmlObject); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + this.originalResultSet.updateSQLXML(columnLabel, xmlObject); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + return this.originalResultSet.getNString(columnIndex); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + return this.originalResultSet.getNString(columnLabel); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + return this.originalResultSet.getNCharacterStream(columnIndex); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + return this.originalResultSet.getNCharacterStream(columnLabel); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + this.originalResultSet.updateNCharacterStream(columnIndex, x, length); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + this.originalResultSet.updateNCharacterStream(columnLabel, reader, length); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + this.originalResultSet.updateAsciiStream(columnIndex, x, length); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + this.originalResultSet.updateBinaryStream(columnIndex, x, length); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + this.originalResultSet.updateCharacterStream(columnIndex, x, length); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + this.originalResultSet.updateAsciiStream(columnLabel, x, length); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + this.originalResultSet.updateBinaryStream(columnLabel, x, length); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + this.originalResultSet.updateCharacterStream(columnLabel, reader, length); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + this.originalResultSet.updateBlob(columnIndex, inputStream, length); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + this.originalResultSet.updateBlob(columnLabel, inputStream, length); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + this.originalResultSet.updateClob(columnIndex, reader, length); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + this.originalResultSet.updateClob(columnLabel, reader, length); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + this.originalResultSet.updateNClob(columnIndex, reader, length); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + this.originalResultSet.updateNClob(columnLabel, reader, length); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + this.originalResultSet.updateNCharacterStream(columnIndex, x); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + this.originalResultSet.updateNCharacterStream(columnLabel, reader); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + this.originalResultSet.updateAsciiStream(columnIndex, x); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + this.originalResultSet.updateBinaryStream(columnIndex, x); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + this.originalResultSet.updateCharacterStream(columnIndex, x); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + this.originalResultSet.updateAsciiStream(columnLabel, x); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + this.originalResultSet.updateBinaryStream(columnLabel, x); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + this.originalResultSet.updateCharacterStream(columnLabel, reader); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + this.originalResultSet.updateBlob(columnIndex, inputStream); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + this.originalResultSet.updateBlob(columnLabel, inputStream); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + this.originalResultSet.updateClob(columnIndex, reader); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + this.originalResultSet.updateClob(columnLabel, reader); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + this.originalResultSet.updateNClob(columnIndex, reader); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + this.originalResultSet.updateNClob(columnLabel, reader); + } + + public T getObject(int columnIndex, Class type) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public T getObject(String columnLabel, Class type) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + @Override + public T unwrap(Class iface) throws SQLException { + return this.originalResultSet.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return this.originalResultSet.isWrapperFor(iface); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java new file mode 100644 index 000000000000..7d4f4c4691c7 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -0,0 +1,244 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +import java.sql.*; +import java.util.ArrayList; +import java.util.List; + +public class TSDBStatement implements Statement { + private TSDBJNIConnector connecter = null; + + /** To store batched commands */ + protected List batchedArgs; + + /** Timeout for a query */ + protected int queryTimeout = 0; + + TSDBStatement(TSDBJNIConnector connecter) { + this.connecter = connecter; + } + + public T unwrap(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isWrapperFor(Class iface) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet executeQuery(String sql) throws SQLException { + this.connecter.executeQuery(sql); + + long resultSetPointer = this.connecter.getResultSet(); + + if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } else if (resultSetPointer == 0) { + return null; + } else { + return new TSDBResultSet(this.connecter, resultSetPointer); + } + } + + public int executeUpdate(String sql) throws SQLException { + return this.connecter.executeQuery(sql); + } + + public String getErrorMsg() { + return this.connecter.getErrMsg(); + } + + public void close() throws SQLException { + } + + public int getMaxFieldSize() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setMaxFieldSize(int max) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getMaxRows() throws SQLException { + // always set maxRows to zero, meaning unlimitted rows in a resultSet + return 0; + } + + public void setMaxRows(int max) throws SQLException { + // always set maxRows to zero, meaning unlimitted rows in a resultSet + } + + public void setEscapeProcessing(boolean enable) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getQueryTimeout() throws SQLException { + return queryTimeout; + } + + public void setQueryTimeout(int seconds) throws SQLException { + this.queryTimeout = seconds; + } + + public void cancel() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public SQLWarning getWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void clearWarnings() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setCursorName(String name) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean execute(String sql) throws SQLException { + return executeUpdate(sql) == 0; + } + + public ResultSet getResultSet() throws SQLException { + long resultSetPointer = connecter.getResultSet(); + TSDBResultSet resSet = null; + if (resultSetPointer != 0l) { + resSet = new TSDBResultSet(connecter, resultSetPointer); + } + return resSet; + } + + public int getUpdateCount() throws SQLException { + return this.connecter.getAffectedRows(); + } + + public boolean getMoreResults() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setFetchDirection(int direction) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getFetchDirection() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + /* + * used by spark + */ + public void setFetchSize(int rows) throws SQLException { + } + + /* + * used by spark + */ + public int getFetchSize() throws SQLException { + return 4096; + } + + public int getResultSetConcurrency() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getResultSetType() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void addBatch(String sql) throws SQLException { + if (batchedArgs == null) { + batchedArgs = new ArrayList(); + } + batchedArgs.add(sql); + } + + public void clearBatch() throws SQLException { + batchedArgs.clear(); + } + + public int[] executeBatch() throws SQLException { + if (batchedArgs == null) { + throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!")); + } else { + int[] res = new int[batchedArgs.size()]; + for (int i = 0; i < batchedArgs.size(); i++) { + res[i] = executeUpdate(batchedArgs.get(i)); + } + return res; + } + } + + public Connection getConnection() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean getMoreResults(int current) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet getGeneratedKeys() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public int getResultSetHoldability() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isClosed() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void setPoolable(boolean poolable) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isPoolable() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public void closeOnCompletion() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public boolean isCloseOnCompletion() throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TaosGlobalConfig.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TaosGlobalConfig.java new file mode 100644 index 000000000000..171189763b21 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TaosGlobalConfig.java @@ -0,0 +1,30 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc; + +public class TaosGlobalConfig { + static String charset = ""; + + public static String getCharset() { + if (charset == null || charset.isEmpty()) { + charset = System.getProperty("file.encoding"); + } + return charset; + } + + public static void setCharset(String tsCharset) { + TaosGlobalConfig.charset = tsCharset; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java new file mode 100644 index 000000000000..7e144cbe0f03 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java @@ -0,0 +1,45 @@ +/*************************************************************************** + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + *****************************************************************************/ +package com.taosdata.jdbc.utils; + +import com.taosdata.jdbc.TSDBConnection; +import com.taosdata.jdbc.TSDBJNIConnector; + +import java.sql.Connection; +import java.sql.SQLException; + +public class SqlSyntaxValidator { + + private TSDBConnection tsdbConnection; + public SqlSyntaxValidator(Connection connection) { + this.tsdbConnection = (TSDBConnection) connection; + } + + public boolean validateSqlSyntax(String sql) throws SQLException { + + boolean res = false; + if (tsdbConnection == null || tsdbConnection.isClosed()) { + throw new SQLException("invalid connection"); + } else { + TSDBJNIConnector jniConnector = tsdbConnection.getConnection(); + if (jniConnector == null) { + throw new SQLException("jniConnector is null"); + } else { + res = jniConnector.validateCreateTableSql(sql); + } + } + return res; + } +} diff --git a/src/connector/python/python2/LICENSE b/src/connector/python/python2/LICENSE new file mode 100644 index 000000000000..79a9d730868b --- /dev/null +++ b/src/connector/python/python2/LICENSE @@ -0,0 +1,12 @@ + Copyright (c) 2019 TAOS Data, Inc. + +This program is free software: you can use, redistribute, and/or modify +it under the terms of the GNU Affero General Public License, version 3 +or later ("AGPL"), as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/src/connector/python/python2/README.md b/src/connector/python/python2/README.md new file mode 100644 index 000000000000..70db6bba13a8 --- /dev/null +++ b/src/connector/python/python2/README.md @@ -0,0 +1 @@ +# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/python2/dist/taos-1.4.15.linux-x86_64.tar.gz b/src/connector/python/python2/dist/taos-1.4.15.linux-x86_64.tar.gz new file mode 100644 index 000000000000..b9c4e9e5718f Binary files /dev/null and b/src/connector/python/python2/dist/taos-1.4.15.linux-x86_64.tar.gz differ diff --git a/src/connector/python/python2/dist/taos-1.4.15.tar.gz b/src/connector/python/python2/dist/taos-1.4.15.tar.gz new file mode 100644 index 000000000000..c50a957a2a64 Binary files /dev/null and b/src/connector/python/python2/dist/taos-1.4.15.tar.gz differ diff --git a/src/connector/python/python2/setup.py b/src/connector/python/python2/setup.py new file mode 100644 index 000000000000..ae5ebad671bc --- /dev/null +++ b/src/connector/python/python2/setup.py @@ -0,0 +1,20 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="1.4.15", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/pypa/sampleproject", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 2", + "Operating System :: Linux", + ], +) diff --git a/src/connector/python/python2/taos.egg-info/PKG-INFO b/src/connector/python/python2/taos.egg-info/PKG-INFO new file mode 100644 index 000000000000..ce6d8c58b219 --- /dev/null +++ b/src/connector/python/python2/taos.egg-info/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 2.1 +Name: taos +Version: 1.4.15 +Summary: TDengine python client package +Home-page: https://github.com/pypa/sampleproject +Author: Taosdata Inc. +Author-email: support@taosdata.com +License: UNKNOWN +Description: # TDengine python client interface +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 2 +Classifier: Operating System :: Linux +Description-Content-Type: text/markdown diff --git a/src/connector/python/python2/taos.egg-info/SOURCES.txt b/src/connector/python/python2/taos.egg-info/SOURCES.txt new file mode 100644 index 000000000000..23a38056c07f --- /dev/null +++ b/src/connector/python/python2/taos.egg-info/SOURCES.txt @@ -0,0 +1,13 @@ +README.md +setup.py +taos/__init__.py +taos/cinterface.py +taos/connection.py +taos/constants.py +taos/cursor.py +taos/dbapi.py +taos/error.py +taos.egg-info/PKG-INFO +taos.egg-info/SOURCES.txt +taos.egg-info/dependency_links.txt +taos.egg-info/top_level.txt \ No newline at end of file diff --git a/src/connector/python/python2/taos.egg-info/dependency_links.txt b/src/connector/python/python2/taos.egg-info/dependency_links.txt new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/src/connector/python/python2/taos.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/connector/python/python2/taos.egg-info/top_level.txt b/src/connector/python/python2/taos.egg-info/top_level.txt new file mode 100644 index 000000000000..6b5f0c008b9a --- /dev/null +++ b/src/connector/python/python2/taos.egg-info/top_level.txt @@ -0,0 +1 @@ +taos diff --git a/src/connector/python/python2/taos/__init__.py b/src/connector/python/python2/taos/__init__.py new file mode 100644 index 000000000000..4894488bc8d4 --- /dev/null +++ b/src/connector/python/python2/taos/__init__.py @@ -0,0 +1,24 @@ + +from .connection import TDengineConnection +from .cursor import TDengineCursor + +# Globals +apilevel = '2.0' +threadsafety = 0 +paramstyle = 'pyformat' + +__all__ = ['connection', 'cursor'] + +def connect(*args, **kwargs): + """ Function to return a TDengine connector object + + Current supporting keyword parameters: + @dsn: Data source name as string + @user: Username as string(optional) + @password: Password as string(optional) + @host: Hostname(optional) + @database: Database name(optional) + + @rtype: TDengineConnector + """ + return TDengineConnection(*args, **kwargs) \ No newline at end of file diff --git a/src/connector/python/python2/taos/cinterface.py b/src/connector/python/python2/taos/cinterface.py new file mode 100644 index 000000000000..e3eaa45b03c2 --- /dev/null +++ b/src/connector/python/python2/taos/cinterface.py @@ -0,0 +1,354 @@ +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli/1000.0) + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro/1000000.0) + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timstamp_converter = _convert_millisecond_to_datetime + if micro: + _timstamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + else: + return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + if num_of_rows > 0: + return [ None if ele.value == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele.value == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + + res = [] + + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + else: + res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + + return res + # if num_of_rows > 0: + # for i in range(abs(num_of_rows)): + # try: + # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + # except ValueError: + # res.append(None) + # return res + # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] + # else: + # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python +} + +# Corresponding TAOS_FIELD structure in C +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 64), + ('bytes', ctypes.c_short), + ('type', ctypes.c_char)] + +# C interface class +class CTaosInterface(object): + + libtaos = ctypes.CDLL('libtaos.so') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config != None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value == None: + print('connect to TDengine failed') + # sys.exit(1) + else: + print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + finally: + CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(connection): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(connection) + + @staticmethod + def useResult(connection): + '''Use result after calling self.query + ''' + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(connection)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return result, fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + + if num_of_rows == 0: + return None, 0 + + blocks = [None] * len(fields) + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(connection): + return CTaosInterface.libtaos.taos_field_count(connection) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(connection): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(connection) + + @staticmethod + def errStr(connection): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file diff --git a/src/connector/python/python2/taos/connection.py b/src/connector/python/python2/taos/connection.py new file mode 100644 index 000000000000..ba2420955260 --- /dev/null +++ b/src/connector/python/python2/taos/connection.py @@ -0,0 +1,80 @@ +# from .cursor import TDengineCursor +from .cursor import TDengineCursor +from .cinterface import CTaosInterface + +class TDengineConnection(object): + """ TDengine connection object + """ + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + result = self._chandle.useResult(self._conn)[0] + if result: + self._chandle.freeResult(result) + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() + print("Hello world") \ No newline at end of file diff --git a/src/connector/python/python2/taos/constants.py b/src/connector/python/python2/taos/constants.py new file mode 100644 index 000000000000..a994bceaf618 --- /dev/null +++ b/src/connector/python/python2/taos/constants.py @@ -0,0 +1,33 @@ +"""Constants in TDengine python +""" + +from .dbapi import * + +class FieldType(object): + """TDengine Field Types + """ + # type_code + C_NULL = 0 + C_BOOL = 1 + C_TINYINT = 2 + C_SMALLINT = 3 + C_INT = 4 + C_BIGINT = 5 + C_FLOAT = 6 + C_DOUBLE = 7 + C_BINARY = 8 + C_TIMESTAMP = 9 + C_NCHAR = 10 + # NULL value definition + # NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL = 0x02 + C_TINYINT_NULL = -128 + C_SMALLINT_NULL = -32768 + C_INT_NULL = -2147483648 + C_BIGINT_NULL = -9223372036854775808 + C_FLOAT_NULL = float('nan') + C_DOUBLE_NULL = float('nan') + C_BINARY_NULL = bytearray([int('0xff', 16)]) + # Time precision definition + C_TIMESTAMP_MILLI = 0 + C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/python2/taos/cursor.py b/src/connector/python/python2/taos/cursor.py new file mode 100644 index 000000000000..933efbde2283 --- /dev/null +++ b/src/connector/python/python2/taos/cursor.py @@ -0,0 +1,178 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineCursor(object): + """Database cursor which is used to manage the context of a fetch operation. + + Attributes: + .description: Read-only attribute consists of 7-item sequences: + + > name (mondatory) + > type_code (mondatory) + > display_size + > internal_size + > precision + > scale + > null_ok + + This attribute will be None for operations that do not return rows or + if the cursor has not had an operation invoked via the .execute*() method yet. + + .rowcount:This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like SELECT) or affected + """ + + def __init__(self, connection=None): + self._description = None + self._rowcount = -1 + self._connection = None + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + + if connection is not None: + self._connection = connection + + def __iter__(self): + return self + + def next(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetch iterator") + + if self._block_rows <= self._block_iter: + block, self._block_rows = CTaosInterface.fetchBlock(self._result, self._fields) + if self._block_rows == 0: + raise StopIteration + self._block = list(map(tuple, zip(*block))) + self._block_iter = 0 + + data = self._block[self._block_iter] + self._block_iter += 1 + + return data + + @property + def description(self): + """Return the description of the object. + """ + return self._description + + @property + def rowcount(self): + """Return the rowcount of the object + """ + return self._rowcount + + def callproc(self, procname, *args): + """Call a stored database procedure with the given name. + + Void functionality since no stored procedures. + """ + pass + + def close(self): + """Close the cursor. + """ + if self._connection is None: + return False + + self._connection.clear_result_set() + self._reset_result() + self._connection = None + + return True + + def execute(self, operation, params=None): + """Prepare and execute a database operation (query or command). + """ + if not operation: + return None + + if not self._connection: + # TODO : change the exception raised here + raise ProgrammingError("Cursor is not connected") + + self._connection.clear_result_set() + self._reset_result() + + stmt = operation + if params is not None: + pass + + res = CTaosInterface.query(self._connection._conn, stmt) + if res == 0: + if CTaosInterface.fieldsCount(self._connection._conn) == 0: + return CTaosInterface.affectedRows(self._connection._conn) + else: + self._result, self._fields = CTaosInterface.useResult(self._connection._conn) + return self._handle_result() + else: + raise ProgrammingError(CTaosInterface.errStr(self._connection._conn)) + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + """ + pass + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. + """ + pass + + def fetchmany(self): + pass + + def fetchall(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. + """ + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + if num_of_fields == 0: break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + + self._connection.clear_result_set() + + return list(map(tuple, zip(*buffer))) + + + + def nextset(self): + """ + """ + pass + + def setinputsize(self, sizes): + pass + + def setutputsize(self, size, column=None): + pass + + def _reset_result(self): + """Reset the result to unused version. + """ + self._description = None + self._rowcount = -1 + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + + def _handle_result(self): + """Handle the return result from query. + """ + self._description = [] + for ele in self._fields: + self._description.append((ele['name'], ele['type'], None, None, None, None, False)) + + return self._result \ No newline at end of file diff --git a/src/connector/python/python2/taos/dbapi.py b/src/connector/python/python2/taos/dbapi.py new file mode 100644 index 000000000000..f1c22bdb5122 --- /dev/null +++ b/src/connector/python/python2/taos/dbapi.py @@ -0,0 +1,38 @@ +"""Type Objects and Constructors. +""" + +import time +import datetime + +class DBAPITypeObject(object): + def __init__(self, *values): + self.values = values + + def __com__(self, other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + +Date = datetime.date +Time = datetime.time +Timestamp = datetime.datetime + +def DataFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + +Binary = bytes + +# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) +# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) +# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) +# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) +# ROWID = DBAPITypeObject() \ No newline at end of file diff --git a/src/connector/python/python2/taos/error.py b/src/connector/python/python2/taos/error.py new file mode 100644 index 000000000000..24508a72ed78 --- /dev/null +++ b/src/connector/python/python2/taos/error.py @@ -0,0 +1,57 @@ +"""Python exceptions +""" + +class Error(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self._full_msg = self.msg + self.errno = errno + + def __str__(self): + return self._full_msg + +class Warning(Exception): + """Exception raised for important warnings like data truncations while inserting. + """ + pass + +class InterfaceError(Error): + """Exception raised for errors that are related to the database interface rather than the database itself. + """ + pass + +class DatabaseError(Error): + """Exception raised for errors that are related to the database. + """ + pass + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + """ + pass + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + """ + pass + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database is affected. + """ + pass + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal error. + """ + pass + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors. + """ + pass + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used which is not supported by the database,. + """ + pass \ No newline at end of file diff --git a/src/connector/python/python3/LICENSE b/src/connector/python/python3/LICENSE new file mode 100644 index 000000000000..79a9d730868b --- /dev/null +++ b/src/connector/python/python3/LICENSE @@ -0,0 +1,12 @@ + Copyright (c) 2019 TAOS Data, Inc. + +This program is free software: you can use, redistribute, and/or modify +it under the terms of the GNU Affero General Public License, version 3 +or later ("AGPL"), as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/src/connector/python/python3/README.md b/src/connector/python/python3/README.md new file mode 100644 index 000000000000..70db6bba13a8 --- /dev/null +++ b/src/connector/python/python3/README.md @@ -0,0 +1 @@ +# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/python3/dist/taos-1.4.15.linux-x86_64.tar.gz b/src/connector/python/python3/dist/taos-1.4.15.linux-x86_64.tar.gz new file mode 100644 index 000000000000..422fa8db5033 Binary files /dev/null and b/src/connector/python/python3/dist/taos-1.4.15.linux-x86_64.tar.gz differ diff --git a/src/connector/python/python3/dist/taos-1.4.15.tar.gz b/src/connector/python/python3/dist/taos-1.4.15.tar.gz new file mode 100644 index 000000000000..51622a16a53a Binary files /dev/null and b/src/connector/python/python3/dist/taos-1.4.15.tar.gz differ diff --git a/src/connector/python/python3/setup.py b/src/connector/python/python3/setup.py new file mode 100644 index 000000000000..0669953ca320 --- /dev/null +++ b/src/connector/python/python3/setup.py @@ -0,0 +1,20 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="1.4.15", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/pypa/sampleproject", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: Linux", + ], +) diff --git a/src/connector/python/python3/taos.egg-info/PKG-INFO b/src/connector/python/python3/taos.egg-info/PKG-INFO new file mode 100644 index 000000000000..b1a77c8ac735 --- /dev/null +++ b/src/connector/python/python3/taos.egg-info/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 2.1 +Name: taos +Version: 1.4.15 +Summary: TDengine python client package +Home-page: https://github.com/pypa/sampleproject +Author: Taosdata Inc. +Author-email: support@taosdata.com +License: UNKNOWN +Description: # TDengine python client interface +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: Linux +Description-Content-Type: text/markdown diff --git a/src/connector/python/python3/taos.egg-info/SOURCES.txt b/src/connector/python/python3/taos.egg-info/SOURCES.txt new file mode 100644 index 000000000000..23a38056c07f --- /dev/null +++ b/src/connector/python/python3/taos.egg-info/SOURCES.txt @@ -0,0 +1,13 @@ +README.md +setup.py +taos/__init__.py +taos/cinterface.py +taos/connection.py +taos/constants.py +taos/cursor.py +taos/dbapi.py +taos/error.py +taos.egg-info/PKG-INFO +taos.egg-info/SOURCES.txt +taos.egg-info/dependency_links.txt +taos.egg-info/top_level.txt \ No newline at end of file diff --git a/src/connector/python/python3/taos.egg-info/dependency_links.txt b/src/connector/python/python3/taos.egg-info/dependency_links.txt new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/src/connector/python/python3/taos.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/connector/python/python3/taos.egg-info/top_level.txt b/src/connector/python/python3/taos.egg-info/top_level.txt new file mode 100644 index 000000000000..6b5f0c008b9a --- /dev/null +++ b/src/connector/python/python3/taos.egg-info/top_level.txt @@ -0,0 +1 @@ +taos diff --git a/src/connector/python/python3/taos/__init__.py b/src/connector/python/python3/taos/__init__.py new file mode 100644 index 000000000000..4894488bc8d4 --- /dev/null +++ b/src/connector/python/python3/taos/__init__.py @@ -0,0 +1,24 @@ + +from .connection import TDengineConnection +from .cursor import TDengineCursor + +# Globals +apilevel = '2.0' +threadsafety = 0 +paramstyle = 'pyformat' + +__all__ = ['connection', 'cursor'] + +def connect(*args, **kwargs): + """ Function to return a TDengine connector object + + Current supporting keyword parameters: + @dsn: Data source name as string + @user: Username as string(optional) + @password: Password as string(optional) + @host: Hostname(optional) + @database: Database name(optional) + + @rtype: TDengineConnector + """ + return TDengineConnection(*args, **kwargs) \ No newline at end of file diff --git a/src/connector/python/python3/taos/cinterface.py b/src/connector/python/python3/taos/cinterface.py new file mode 100644 index 000000000000..c33fa7f2c572 --- /dev/null +++ b/src/connector/python/python3/taos/cinterface.py @@ -0,0 +1,370 @@ +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli/1000.0) + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro/1000000.0) + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + else: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)] ] + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + if num_of_rows > 0: + return [ None if ele.value == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele.value == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + + res = [] + + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + else: + res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + + return res + # if num_of_rows > 0: + # for i in range(abs(num_of_rows)): + # try: + # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + # except ValueError: + # res.append(None) + # return res + # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] + # else: + # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python +} + +# Corresponding TAOS_FIELD structure in C +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 64), + ('bytes', ctypes.c_short), + ('type', ctypes.c_char)] + +# C interface class +class CTaosInterface(object): + + libtaos = ctypes.CDLL('libtaos.so') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config != None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value == None: + print('connect to TDengine failed') + # sys.exit(1) + else: + print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + finally: + CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(connection): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(connection) + + @staticmethod + def useResult(connection): + '''Use result after calling self.query + ''' + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(connection)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return result, fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + + if num_of_rows == 0: + return None, 0 + + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(connection): + return CTaosInterface.libtaos.taos_field_count(connection) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(connection): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(connection) + + @staticmethod + def errStr(connection): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/python3/taos/connection.py b/src/connector/python/python3/taos/connection.py new file mode 100644 index 000000000000..ba2420955260 --- /dev/null +++ b/src/connector/python/python3/taos/connection.py @@ -0,0 +1,80 @@ +# from .cursor import TDengineCursor +from .cursor import TDengineCursor +from .cinterface import CTaosInterface + +class TDengineConnection(object): + """ TDengine connection object + """ + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + result = self._chandle.useResult(self._conn)[0] + if result: + self._chandle.freeResult(result) + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() + print("Hello world") \ No newline at end of file diff --git a/src/connector/python/python3/taos/constants.py b/src/connector/python/python3/taos/constants.py new file mode 100644 index 000000000000..feb7050a40b6 --- /dev/null +++ b/src/connector/python/python3/taos/constants.py @@ -0,0 +1,33 @@ +"""Constants in TDengine python +""" + +from .dbapi import * + +class FieldType(object): + """TDengine Field Types + """ + # type_code + C_NULL = 0 + C_BOOL = 1 + C_TINYINT = 2 + C_SMALLINT = 3 + C_INT = 4 + C_BIGINT = 5 + C_FLOAT = 6 + C_DOUBLE = 7 + C_BINARY = 8 + C_TIMESTAMP = 9 + C_NCHAR = 10 + # NULL value definition + # NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL = 0x02 + C_TINYINT_NULL = -128 + C_SMALLINT_NULL = -32768 + C_INT_NULL = -2147483648 + C_BIGINT_NULL = -9223372036854775808 + C_FLOAT_NULL = float('nan') + C_DOUBLE_NULL = float('nan') + C_BINARY_NULL = bytearray([int('0xff', 16)]) + # Timestamp precision definition + C_TIMESTAMP_MILLI = 0 + C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/python3/taos/cursor.py b/src/connector/python/python3/taos/cursor.py new file mode 100644 index 000000000000..656a348472d3 --- /dev/null +++ b/src/connector/python/python3/taos/cursor.py @@ -0,0 +1,178 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineCursor(object): + """Database cursor which is used to manage the context of a fetch operation. + + Attributes: + .description: Read-only attribute consists of 7-item sequences: + + > name (mondatory) + > type_code (mondatory) + > display_size + > internal_size + > precision + > scale + > null_ok + + This attribute will be None for operations that do not return rows or + if the cursor has not had an operation invoked via the .execute*() method yet. + + .rowcount:This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like SELECT) or affected + """ + + def __init__(self, connection=None): + self._description = None + self._rowcount = -1 + self._connection = None + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + + if connection is not None: + self._connection = connection + + def __iter__(self): + return self + + def __next__(self): + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetch iterator") + + if self._block_rows <= self._block_iter: + block, self._block_rows = CTaosInterface.fetchBlock(self._result, self._fields) + if self._block_rows == 0: + raise StopIteration + self._block = list(map(tuple, zip(*block))) + self._block_iter = 0 + + data = self._block[self._block_iter] + self._block_iter += 1 + + return data + + @property + def description(self): + """Return the description of the object. + """ + return self._description + + @property + def rowcount(self): + """Return the rowcount of the object + """ + return self._rowcount + + def callproc(self, procname, *args): + """Call a stored database procedure with the given name. + + Void functionality since no stored procedures. + """ + pass + + def close(self): + """Close the cursor. + """ + if self._connection is None: + return False + + self._connection.clear_result_set() + self._reset_result() + self._connection = None + + return True + + def execute(self, operation, params=None): + """Prepare and execute a database operation (query or command). + """ + if not operation: + return None + + if not self._connection: + # TODO : change the exception raised here + raise ProgrammingError("Cursor is not connected") + + self._connection.clear_result_set() + self._reset_result() + + stmt = operation + if params is not None: + pass + + res = CTaosInterface.query(self._connection._conn, stmt) + if res == 0: + if CTaosInterface.fieldsCount(self._connection._conn) == 0: + return CTaosInterface.affectedRows(self._connection._conn) + else: + self._result, self._fields = CTaosInterface.useResult(self._connection._conn) + return self._handle_result() + else: + raise ProgrammingError(CTaosInterface.errStr(self._connection._conn)) + + def executemany(self, operation, seq_of_parameters): + """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. + """ + pass + + def fetchone(self): + """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. + """ + pass + + def fetchmany(self): + pass + + def fetchall(self): + """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. + """ + if self._result is None or self._fields is None: + raise OperationalError("Invalid use of fetchall") + + buffer = [[] for i in range(len(self._fields))] + self._rowcount = 0 + while True: + block, num_of_fields = CTaosInterface.fetchBlock(self._result, self._fields) + if num_of_fields == 0: break + self._rowcount += num_of_fields + for i in range(len(self._fields)): + buffer[i].extend(block[i]) + + self._connection.clear_result_set() + + return list(map(tuple, zip(*buffer))) + + + + def nextset(self): + """ + """ + pass + + def setinputsize(self, sizes): + pass + + def setutputsize(self, size, column=None): + pass + + def _reset_result(self): + """Reset the result to unused version. + """ + self._description = None + self._rowcount = -1 + self._result = None + self._fields = None + self._block = None + self._block_rows = -1 + self._block_iter = 0 + + def _handle_result(self): + """Handle the return result from query. + """ + self._description = [] + for ele in self._fields: + self._description.append((ele['name'], ele['type'], None, None, None, None, False)) + + return self._result \ No newline at end of file diff --git a/src/connector/python/python3/taos/dbapi.py b/src/connector/python/python3/taos/dbapi.py new file mode 100644 index 000000000000..f1c22bdb5122 --- /dev/null +++ b/src/connector/python/python3/taos/dbapi.py @@ -0,0 +1,38 @@ +"""Type Objects and Constructors. +""" + +import time +import datetime + +class DBAPITypeObject(object): + def __init__(self, *values): + self.values = values + + def __com__(self, other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + +Date = datetime.date +Time = datetime.time +Timestamp = datetime.datetime + +def DataFromTicks(ticks): + return Date(*time.localtime(ticks)[:3]) + +def TimeFromTicks(ticks): + return Time(*time.localtime(ticks)[3:6]) + +def TimestampFromTicks(ticks): + return Timestamp(*time.localtime(ticks)[:6]) + +Binary = bytes + +# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) +# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) +# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) +# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) +# ROWID = DBAPITypeObject() \ No newline at end of file diff --git a/src/connector/python/python3/taos/error.py b/src/connector/python/python3/taos/error.py new file mode 100644 index 000000000000..24508a72ed78 --- /dev/null +++ b/src/connector/python/python3/taos/error.py @@ -0,0 +1,57 @@ +"""Python exceptions +""" + +class Error(Exception): + def __init__(self, msg=None, errno=None): + self.msg = msg + self._full_msg = self.msg + self.errno = errno + + def __str__(self): + return self._full_msg + +class Warning(Exception): + """Exception raised for important warnings like data truncations while inserting. + """ + pass + +class InterfaceError(Error): + """Exception raised for errors that are related to the database interface rather than the database itself. + """ + pass + +class DatabaseError(Error): + """Exception raised for errors that are related to the database. + """ + pass + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + """ + pass + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + """ + pass + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database is affected. + """ + pass + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal error. + """ + pass + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors. + """ + pass + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used which is not supported by the database,. + """ + pass \ No newline at end of file diff --git a/src/inc/buildInfo.h b/src/inc/buildInfo.h new file mode 100755 index 000000000000..8d169d618d51 --- /dev/null +++ b/src/inc/buildInfo.h @@ -0,0 +1,7 @@ +#ifndef _TS_BUILD_H_ +#define _TS_BUILD_H_ + +extern const char tsVersion[]; +extern const char tsBuildInfo[]; + +#endif diff --git a/src/inc/ihash.h b/src/inc/ihash.h new file mode 100644 index 000000000000..8d9ad76e6f16 --- /dev/null +++ b/src/inc/ihash.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_IHASH_H +#define TDENGINE_IHASH_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +void *taosInitIntHash(int32_t maxSessions, int32_t dataSize, int32_t (*fp)(void *, int32_t)); + +void taosCleanUpIntHash(void *handle); + +char *taosGetIntHashData(void *handle, int32_t key); + +void taosDeleteIntHash(void *handle, int32_t key); + +char *taosAddIntHash(void *handle, int32_t key, char *pData); + +int32_t taosHashInt(void *handle, int32_t key); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_IHASH_H diff --git a/src/inc/lz4.h b/src/inc/lz4.h new file mode 100644 index 000000000000..d284d630043c --- /dev/null +++ b/src/inc/lz4.h @@ -0,0 +1,475 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Header File + * Copyright (C) 2011-2017, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef LZ4_H_2983827168210 +#define LZ4_H_2983827168210 + +/* --- Dependency --- */ +#include /* size_t */ + + +/** + Introduction + + LZ4 is lossless compression algorithm, providing compression speed at 400 MB/s per core, + scalable with multi-cores CPU. It features an extremely fast decoder, with speed in + multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. + + The LZ4 compression library provides in-memory compression and decompression functions. + Compression can be done in: + - a single step (described as Simple Functions) + - a single step, reusing a context (described in Advanced Functions) + - unbounded multiple steps (described as Streaming compression) + + lz4.h provides block compression functions. It gives full buffer control to user. + Decompressing an lz4-compressed block also requires metadata (such as compressed size). + Each application is free to encode such metadata in whichever way it wants. + + An additional format, called LZ4 frame specification (doc/lz4_Frame_format.md), + take care of encoding standard metadata alongside LZ4-compressed blocks. + If your application requires interoperability, it's recommended to use it. + A library is provided to take care of it, see lz4frame.h. +*/ + +/*^*************************************************************** +* Export parameters +*****************************************************************/ +/* +* LZ4_DLL_EXPORT : +* Enable exporting of functions when building a Windows DLL +* LZ4LIB_API : +* Control library symbols visibility. +*/ +#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) +# define LZ4LIB_API __declspec(dllexport) +#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) +# define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#elif defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_API __attribute__ ((__visibility__ ("default"))) +#else +# define LZ4LIB_API +#endif + + +/*------ Version ------*/ +#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ +#define LZ4_VERSION_MINOR 8 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ + +#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) + +#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +#define LZ4_QUOTE(str) #str +#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) +#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) + +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; to be used when checking dll version */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; to be used when checking dll version */ + + +/*-************************************ +* Tuning parameter +**************************************/ +/*! + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache + */ +#ifndef LZ4_MEMORY_USAGE +# define LZ4_MEMORY_USAGE 14 +#endif + +/*-************************************ +* Simple Functions +**************************************/ +/*! LZ4_compress_default() : + Compresses 'sourceSize' bytes from buffer 'source' + into already allocated 'dest' buffer of size 'maxDestSize'. + Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize). + It also runs faster, so it's a recommended setting. + If the function cannot compress 'source' into a more limited 'dest' budget, + compression stops *immediately*, and the function result is zero. + As a consequence, 'dest' content is not valid. + This function never writes outside 'dest' buffer, nor read outside 'source' buffer. + sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE + maxDestSize : full or partial size of buffer 'dest' (which must be already allocated) + return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize) + or 0 if compression fails */ +LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize); + +/*! LZ4_decompress_safe() : + compressedSize : is the precise full size of the compressed block. + maxDecompressedSize : is the size of destination buffer, which must be already allocated. + return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize) + If destination buffer is not large enough, decoding will stop and output an error code (<0). + If the source stream is detected malformed, the function will stop decoding and return a negative result. + This function is protected against buffer overflow exploits, including malicious data packets. + It never writes outside output buffer, nor reads outside input buffer. +*/ +LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize); + + +/*-************************************ +* Advanced Functions +**************************************/ +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) + +/*! +LZ4_compressBound() : + Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) + This function is primarily useful for memory allocation purposes (destination buffer size). + Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). + Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize) + inputSize : max supported value is LZ4_MAX_INPUT_SIZE + return : maximum output size in a "worst case" scenario + or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) +*/ +LZ4LIB_API int LZ4_compressBound(int inputSize); + +/*! +LZ4_compress_fast() : + Same as LZ4_compress_default(), but allows to select an "acceleration" factor. + The larger the acceleration value, the faster the algorithm, but also the lesser the compression. + It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. + An acceleration value of "1" is the same as regular LZ4_compress_default() + Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. +*/ +LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration); + + +/*! +LZ4_compress_fast_extState() : + Same compression function, just using an externally allocated memory space to store compression state. + Use LZ4_sizeofState() to know how much memory must be allocated, + and allocate it on 8-bytes boundaries (using malloc() typically). + Then, provide it as 'void* state' to compression function. +*/ +LZ4LIB_API int LZ4_sizeofState(void); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration); + + +/*! +LZ4_compress_destSize() : + Reverse the logic, by compressing as much data as possible from 'source' buffer + into already allocated buffer 'dest' of size 'targetDestSize'. + This function either compresses the entire 'source' content into 'dest' if it's large enough, + or fill 'dest' buffer completely with as much data as possible from 'source'. + *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'. + New value is necessarily <= old value. + return : Nb bytes written into 'dest' (necessarily <= targetDestSize) + or 0 if compression fails +*/ +LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize); + + +/*! +LZ4_decompress_fast() : + originalSize : is the original and therefore uncompressed size + return : the number of bytes read from the source buffer (in other words, the compressed size) + If the source stream is detected malformed, the function will stop decoding and return a negative result. + Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. + note : This function fully respect memory boundaries for properly formed compressed data. + It is a bit faster than LZ4_decompress_safe(). + However, it does not provide any protection against intentionally modified data stream (malicious input). + Use this function in trusted environment only (data to decode comes from a trusted source). +*/ +LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize); + +/*! +LZ4_decompress_safe_partial() : + This function decompress a compressed block of size 'compressedSize' at position 'source' + into destination buffer 'dest' of size 'maxDecompressedSize'. + The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, + reducing decompression time. + return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) + Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. + Always control how many bytes were decoded. + If the source stream is detected malformed, the function will stop decoding and return a negative result. + This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets +*/ +LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); + + +/*-********************************************* +* Streaming Compression Functions +***********************************************/ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ + +/*! LZ4_createStream() and LZ4_freeStream() : + * LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure. + * LZ4_freeStream() releases its memory. + */ +LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); +LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); + +/*! LZ4_resetStream() : + * An LZ4_stream_t structure can be allocated once and re-used multiple times. + * Use this function to start compressing a new stream. + */ +LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); + +/*! LZ4_loadDict() : + * Use this function to load a static dictionary into LZ4_stream_t. + * Any previous data will be forgotten, only 'dictionary' will remain in memory. + * Loading a size of 0 is allowed, and is the same as reset. + * @return : dictionary size, in bytes (necessarily <= 64 KB) + */ +LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_compress_fast_continue() : + * Compress content into 'src' using data from previously compressed blocks, improving compression ratio. + * 'dst' buffer must be already allocated. + * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. + * + * Important : Up to 64KB of previously compressed data is assumed to remain present and unmodified in memory ! + * Special 1 : If input buffer is a double-buffer, it can have any size, including < 64 KB. + * Special 2 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * @return : size of compressed block + * or 0 if there is an error (typically, compressed data cannot fit into 'dst') + * After an error, the stream status is invalid, it can only be reset or freed. + */ +LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at its current memory location, + * save it into a safer place (char* safeBuffer). + * Note : it's not necessary to call LZ4_loadDict() after LZ4_saveDict(), dictionary is immediately usable. + * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + */ +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize); + + +/*-********************************************** +* Streaming Decompression Functions +* Bufferless synchronous API +************************************************/ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */ + +/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : + * creation / destruction of streaming decompression tracking structure. + * A tracking structure can be re-used multiple times sequentially. */ +LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); +LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); + +/*! LZ4_setStreamDecode() : + * An LZ4_streamDecode_t structure can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionnally be set. Use NULL or size 0 for a simple reset order. + * @return : 1 if OK, 0 if error + */ +LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); + +/*! LZ4_decompress_*_continue() : + * These decoding functions allow decompression of consecutive blocks in "streaming" mode. + * A block is an unsplittable entity, it must be presented entirely to a decompression function. + * Decompression functions only accept one block at a time. + * Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB). + * + * Special : if application sets a ring buffer for decompression, it must respect one of the following conditions : + * - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) + * In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. It's the maximum size of any single block. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including larger than decoding buffer. + * Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, + * and indicate where it is saved using LZ4_setStreamDecode() before decompressing next block. +*/ +LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize); +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize); + + +/*! LZ4_decompress_*_usingDict() : + * These decoding functions work the same as + * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() + * They are stand-alone, and don't need an LZ4_streamDecode_t structure. + */ +LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize); +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize); + + +/*^********************************************** + * !!!!!! STATIC LINKING ONLY !!!!!! + ***********************************************/ +/*-************************************ + * Private definitions + ************************************** + * Do not use these definitions. + * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. + * Using these definitions will expose code to API and/or ABI break in future versions of the library. + **************************************/ +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ + +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +#include + +typedef struct { + uint32_t hashTable[LZ4_HASH_SIZE_U32]; + uint32_t currentOffset; + uint32_t initCheck; + const uint8_t* dictionary; + uint8_t* bufferStart; /* obsolete, used for slideInputBuffer */ + uint32_t dictSize; +} LZ4_stream_t_internal; + +typedef struct { + const uint8_t* externalDict; + size_t extDictSize; + const uint8_t* prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#else + +typedef struct { + unsigned int hashTable[LZ4_HASH_SIZE_U32]; + unsigned int currentOffset; + unsigned int initCheck; + const unsigned char* dictionary; + unsigned char* bufferStart; /* obsolete, used for slideInputBuffer */ + unsigned int dictSize; +} LZ4_stream_t_internal; + +typedef struct { + const unsigned char* externalDict; + size_t extDictSize; + const unsigned char* prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#endif + +/*! + * LZ4_stream_t : + * information structure to track an LZ4 stream. + * init this structure before first use. + * note : only use in association with static linking ! + * this definition is not API/ABI safe, + * it may change in a future version ! + */ +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) +union LZ4_stream_u { + unsigned long long table[LZ4_STREAMSIZE_U64]; + LZ4_stream_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_stream_t */ + + +/*! + * LZ4_streamDecode_t : + * information structure to track an LZ4 stream during decompression. + * init this structure using LZ4_setStreamDecode (or memset()) before first use + * note : only use in association with static linking ! + * this definition is not API/ABI safe, + * and may change in a future version ! + */ +#define LZ4_STREAMDECODESIZE_U64 4 +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) +union LZ4_streamDecode_u { + unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + LZ4_streamDecode_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_streamDecode_t */ + + +/*-************************************ +* Obsolete Functions +**************************************/ + +/*! Deprecation warnings + Should deprecation warnings be a problem, + it is generally possible to disable them, + typically with -Wno-deprecated-declarations for gcc + or _CRT_SECURE_NO_WARNINGS in Visual. + Otherwise, it's also possible to define LZ4_DISABLE_DEPRECATE_WARNINGS */ +#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ +#else +# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined(__clang__) /* clang doesn't handle mixed C++11 and CNU attributes */ +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define LZ4_DEPRECATED(message) [[deprecated(message)]] +# elif (LZ4_GCC_VERSION >= 405) +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif (LZ4_GCC_VERSION >= 301) +# define LZ4_DEPRECATED(message) __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler") +# define LZ4_DEPRECATED(message) +# endif +#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ + +/* Obsolete compression functions */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress (const char* source, char* dest, int sourceSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/* Obsolete decompression functions */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast() instead") int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe() instead") int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); + +/* Obsolete streaming functions; use new streaming interface whenever possible */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state); + +/* Obsolete streaming decoding functions */ +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); + +#endif /* LZ4_H_2983827168210 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/inc/sdb.h b/src/inc/sdb.h new file mode 100644 index 000000000000..39416a32165d --- /dev/null +++ b/src/inc/sdb.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_SDB_H +#define TDENGINE_SDB_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taosmsg.h" +#include "tsdb.h" + +extern int sdbDebugFlag; +extern short sdbPeerPort; +extern short sdbSyncPort; +extern int sdbMaxNodes; +extern int sdbHbTimer; // seconds +extern char sdbZone[]; +extern char sdbMasterIp[]; +extern char sdbPrivateIp[]; +extern char * sdbStatusStr[]; +extern char * sdbRoleStr[]; +extern void * mnodeSdb; +extern int sdbExtConns; +extern int sdbMaster; +extern uint32_t sdbPublicIp; +extern uint32_t sdbMasterStartTime; +extern SIpList *pSdbIpList; +extern SIpList *pSdbPublicIpList; + +extern void (*sdbWorkAsMasterCallback)(); // this function pointer will be set by taosd + +enum _keytype { + SDB_KEYTYPE_STRING, SDB_KEYTYPE_UINT32, SDB_KEYTYPE_AUTO, SDB_KEYTYPE_RECYCLE, SDB_KEYTYPE_MAX +}; + +#define SDB_ROLE_UNAPPROVED 0 +#define SDB_ROLE_UNDECIDED 1 +#define SDB_ROLE_MASTER 2 +#define SDB_ROLE_SLAVE 3 + +#define SDB_STATUS_OFFLINE 0 +#define SDB_STATUS_UNSYNCED 1 +#define SDB_STATUS_SYNCING 2 +#define SDB_STATUS_SERVING 3 +#define SDB_STATUS_DELETED 4 + +enum _sdbaction { + SDB_TYPE_INSERT, + SDB_TYPE_DELETE, + SDB_TYPE_UPDATE, + SDB_TYPE_DECODE, + SDB_TYPE_ENCODE, + SDB_TYPE_BEFORE_BATCH_UPDATE, + SDB_TYPE_BATCH_UPDATE, + SDB_TYPE_AFTER_BATCH_UPDATE, + SDB_TYPE_RESET, + SDB_TYPE_DESTROY, + SDB_MAX_ACTION_TYPES +}; + +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, + void *(*appTool)(char, void *, char *, int, int *)); + +void *sdbGetRow(void *handle, void *key); + +int64_t sdbInsertRow(void *handle, void *row, int rowSize); + +int sdbDeleteRow(void *handle, void *key); + +int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated); + +void *sdbFetchRow(void *handle, void *pNode, void **ppRow); + +int sdbBatchUpdateRow(void *handle, void *row, int rowSize); + +int64_t sdbGetId(void *handle); + +int64_t sdbGetNumOfRows(void *handle); + +void sdbSaveSnapShot(void *handle); + +void sdbCloseTable(void *handle); + +int sdbRemovePeerByIp(uint32_t ip); + +int sdbInitPeers(char *directory); + +void sdbCleanUpPeers(); + +int sdbCfgNode(char *cont); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_SDB_H diff --git a/src/inc/shash.h b/src/inc/shash.h new file mode 100644 index 000000000000..2651a6b349eb --- /dev/null +++ b/src/inc/shash.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSHASH_H +#define TDENGINE_TSHASH_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +void *taosInitStrHash(uint32_t maxSessions, uint32_t dataSize, uint32_t (*fp)(void *, char *)); + +void taosCleanUpStrHash(void *handle); + +void *taosGetStrHashData(void *handle, char *string); + +void taosDeleteStrHash(void *handle, char *string); + +void taosDeleteStrHashNode(void *handle, char *string, void *pDeleteNode); + +void *taosAddStrHash(void *handle, char *string, char *pData); + +void *taosAddStrHashWithSize(void *handle, char *string, char *pData, int dataSize); + +uint32_t taosHashString(void *handle, char *string); + +uint32_t taosHashStringStep1(void *handle, char *string); + +char *taosVisitStrHashWithFp(void *handle, int (*fp)(char *)); + +void taosCleanUpStrHashWithFp(void *handle, void (*fp)(char *)); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSHASH_H diff --git a/src/inc/sql.y b/src/inc/sql.y new file mode 100755 index 000000000000..89b60b6b7219 --- /dev/null +++ b/src/inc/sql.y @@ -0,0 +1,620 @@ +//lemon parser file to generate sql parse by using finite-state-machine code used to parse sql +//usage: lemon sql.y +%token_prefix TK_ + +%token_type {SSQLToken} +%default_type {SSQLToken} +%extra_argument {SSqlInfo* pInfo} + +%fallback ID BOOL TINYINT SMALLINT INTEGER BIGINT FLOAT DOUBLE STRING TIMESTAMP BINARY NCHAR. + +%left OR. +%left AND. +%right NOT. +%left EQ NE ISNULL NOTNULL IS LIKE GLOB BETWEEN IN. +%left GT GE LT LE. +%left BITAND BITOR LSHIFT RSHIFT. +%left PLUS MINUS. +%left DIVIDE TIMES. +%left STAR SLASH REM. +%left CONCAT. +%right UMINUS UPLUS BITNOT. + +%include { +#include +#include +#include +#include +#include + +#include "tsql.h" +#include "tutil.h" +} + +%syntax_error { + pInfo->validSql = false; + int32_t outputBufLen = tListLen(pInfo->pzErrMsg); + int32_t len = 0; + + if(TOKEN.z) { + char msg[] = "syntax error near \"%s\""; + int32_t sqlLen = strlen(&TOKEN.z[0]); + + if (sqlLen + sizeof(msg)/sizeof(msg[0]) + 1 > outputBufLen) { + char tmpstr[128] = {0}; + memcpy(tmpstr, &TOKEN.z[0], sizeof(tmpstr)/sizeof(tmpstr[0]) - 1); + len = sprintf(pInfo->pzErrMsg, msg, tmpstr); + } else { + len = sprintf(pInfo->pzErrMsg, msg, &TOKEN.z[0]); + } + + } else { + len = sprintf(pInfo->pzErrMsg, "Incomplete SQL statement"); + } + + assert(len <= outputBufLen); +} + +%parse_accept {} + +program ::= cmd. {} + +//////////////////////////////////THE SHOW STATEMENT/////////////////////////////////////////// +cmd ::= SHOW DATABASES. { setDCLSQLElems(pInfo, SHOW_DATABASES, 0);} +cmd ::= SHOW MNODES. { setDCLSQLElems(pInfo, SHOW_MNODES, 0);} +cmd ::= SHOW DNODES. { setDCLSQLElems(pInfo, SHOW_DNODES, 0);} +cmd ::= SHOW USERS. { setDCLSQLElems(pInfo, SHOW_USERS, 0);} + +cmd ::= SHOW MODULES. { setDCLSQLElems(pInfo, SHOW_MODULES, 0); } +cmd ::= SHOW QUERIES. { setDCLSQLElems(pInfo, SHOW_QUERIES, 0); } +cmd ::= SHOW CONNECTIONS.{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);} +cmd ::= SHOW STREAMS. { setDCLSQLElems(pInfo, SHOW_STREAMS, 0); } +cmd ::= SHOW CONFIGS. { setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } +cmd ::= SHOW SCORES. { setDCLSQLElems(pInfo, SHOW_SCORES, 0); } +cmd ::= SHOW GRANTS. { setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } + +%type dbPrefix {SSQLToken} +dbPrefix(A) ::=. {A.n = 0;} +dbPrefix(A) ::= ids(X) DOT. {A = X; } + +%type cpxName {SSQLToken} +cpxName(A) ::= . {A.n = 0; } +cpxName(A) ::= DOT ids(Y). {A = Y; A.n += 1; } + +cmd ::= SHOW dbPrefix(X) TABLES. { + setDCLSQLElems(pInfo, SHOW_TABLES, 1, &X); +} + +cmd ::= SHOW dbPrefix(X) TABLES LIKE ids(Y). { + setDCLSQLElems(pInfo, SHOW_TABLES, 2, &X, &Y); +} + +cmd ::= SHOW dbPrefix(X) STABLES. { + setDCLSQLElems(pInfo, SHOW_STABLES, 1, &X); +} + +cmd ::= SHOW dbPrefix(X) STABLES LIKE ids(Y). { + SSQLToken token; + setDBName(&token, &X); + setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &Y); +} + +cmd ::= SHOW dbPrefix(X) VGROUPS. { + SSQLToken token; + setDBName(&token, &X); + setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); +} + +//drop configure for tables +cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). { + X.n += Z.n; + setDCLSQLElems(pInfo, DROP_TABLE, 2, &X, &Y); +} + +cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDCLSQLElems(pInfo, DROP_DATABASE, 2, &X, &Y); } +cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, DROP_USER, 1, &X); } + +/////////////////////////////////THE USE STATEMENT////////////////////////////////////////// +cmd ::= USE ids(X). { setDCLSQLElems(pInfo, USE_DATABASE, 1, &X);} + +/////////////////////////////////THE DESCRIBE STATEMENT///////////////////////////////////// +cmd ::= DESCRIBE ids(X) cpxName(Y). { + X.n += Y.n; + setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &X); +} + +/////////////////////////////////THE ALTER STATEMENT//////////////////////////////////////// +cmd ::= ALTER USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &X, &Y); } +cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &X, &Y);} +cmd ::= ALTER DNODE IP(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &X, &Y); } +cmd ::= ALTER DNODE IP(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &X, &Y, &Z); } +cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &X); } +cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &X, &Y, &t);} + +// An IDENTIFIER can be a generic identifier, or one of several keywords. +// Any non-standard keyword can also be an identifier. +// And "ids" is an identifer-or-string. +%type ids {SSQLToken} +ids(A) ::= ID(X). {A = X; } +ids(A) ::= STRING(X). {A = X; } + +%type ifexists {SSQLToken} +ifexists(X) ::= IF EXISTS. {X.n = 1;} +ifexists(X) ::= . {X.n = 0;} + +%type ifnotexists {SSQLToken} +ifnotexists(X) ::= IF NOT EXISTS. {X.n = 1;} +ifnotexists(X) ::= . {X.n = 0;} + +/////////////////////////////////THE CREATE STATEMENT/////////////////////////////////////// +cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, CREATE_DATABASE, &X, &Y, &Z);} +cmd ::= CREATE USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, CREATE_USER, 2, &X, &Y);} + +%type keep {tVariantList*} +%destructor keep {tVariantListDestroy($$);} +keep(Y) ::= . {Y = 0; } +keep(Y) ::= KEEP tagitemlist(X). {Y = X; } + +replica(Y) ::= . {Y.n = 0; } +replica(Y) ::= REPLICA INTEGER(X). {Y = X; } + +day(Y) ::= . {Y.n = 0; } +day(Y) ::= DAYS INTEGER(X). {Y = X; } + +rows(Y) ::= ROWS INTEGER(X). {Y = X; } +rows(Y) ::= . {Y.n = 0; } + +cache(Y) ::= CACHE INTEGER(X). {Y = X; } +cache(Y) ::= . {Y.n = 0; } +ablocks(Y) ::= ABLOCKS ID(X). {Y = X; } +ablocks(Y) ::= . {Y.n = 0; } +tblocks(Y) ::= TBLOCKS INTEGER(X). {Y = X; } +tblocks(Y) ::= . {Y.n = 0; } +tables(Y) ::= TABLES INTEGER(X). {Y = X; } +tables(Y) ::= . {Y.n = 0; } +ctime(Y) ::= CTIME INTEGER(X). {Y = X; } +ctime(Y) ::= . {Y.n = 0; } +clog(Y) ::= CLOG INTEGER(X). {Y = X; } +clog(Y) ::= . {Y.n = 0; } +comp(Y) ::= COMP INTEGER(X). {Y = X; } +comp(Y) ::= . {Y.n = 0; } +prec(Y) ::= PRECISION ids(X). {Y = X; } +prec(Y) ::= . {Y.n = 0; } + +%type db_optr {SCreateDBSQL} +db_optr(Y) ::= replica(A) day(B) keep(C) rows(D) cache(E) ablocks(F) tblocks(K) tables(G) ctime(H) clog(I) comp(J) prec(L). { + Y.nReplica = (A.n > 0)? atoi(A.z):-1; + Y.nDays = (B.n > 0)? atoi(B.z):-1; + Y.nRowsInFileBlock = (D.n > 0)? atoi(D.z):-1; + + Y.nCacheBlockSize = (E.n > 0)? atoi(E.z):-1; + Y.nCacheNumOfBlocks = (F.n > 0)? strtod(F.z, NULL):-1; + Y.numOfBlocksPerTable = (K.n > 0)? atoi(K.z):-1; + Y.nTablesPerVnode = (G.n > 0)? atoi(G.z):-1; + Y.commitTime = (H.n > 0)? atoi(H.z):-1; + Y.commitLog = (I.n > 0)? atoi(I.z):-1; + Y.compressionLevel = (J.n > 0)? atoi(J.z):-1; + + Y.keep = C; + Y.precision = L; +} + +%type alter_db_optr {SCreateDBSQL} +alter_db_optr(Y) ::= replica(A). { + Y.nReplica = (A.n > 0)? atoi(A.z):0; +} + +%type typename {TAOS_FIELD} +typename(A) ::= ids(X). { tSQLSetColumnType (&A, &X); } + +//define binary type, e.g., binary(10), nchar(10) +typename(A) ::= ids(X) LP signed(Y) RP. { + X.type = -Y; // negative value of name length + tSQLSetColumnType(&A, &X); +} + +%type signed {int} +signed(A) ::= INTEGER(X). { A = atoi(X.z); } +signed(A) ::= PLUS INTEGER(X). { A = strtol(X.z, NULL, 10); } +signed(A) ::= MINUS INTEGER(X). { A = -strtol(X.z, NULL, 10);} + +////////////////////////////////// The CREATE TABLE statement /////////////////////////////// +cmd ::= CREATE TABLE ifnotexists(Y) ids(X) cpxName(Z) create_table_args. { + X.n += Z.n; + setCreatedMeterName(pInfo, &X, &Y); +} + +%type create_table_args{SCreateTableSQL*} +create_table_args(A) ::= LP columnlist(X) RP. { + A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); + setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METER); +} + +// create metric +create_table_args(A) ::= LP columnlist(X) RP TAGS LP columnlist(Y) RP. { + A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); + setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METRIC); +} + +// create meter by using metric +// create meter meter_name using metric_name tags(tag_values1, tag_values2) +create_table_args(A) ::= USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { + X.n += F.n; + A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_METER_FROM_METRIC); + setSQLInfo(pInfo, A, NULL, TSQL_CREATE_METER_FROM_METRIC); +} + +// create stream +// create table table_name as select count(*) from metric_name interval(time) +create_table_args(A) ::= AS select(S). { + A = tSetCreateSQLElems(NULL, NULL, NULL, NULL, S, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, A, NULL, TSQL_CREATE_STREAM); +} + +%type column{TAOS_FIELD} +%type columnlist{tFieldList*} +columnlist(A) ::= columnlist(X) COMMA column(Y). {A = tFieldListAppend(X, &Y); } +columnlist(A) ::= column(X). {A = tFieldListAppend(NULL, &X);} + +// The information used for a column is the name and type of column: +// tinyint smallint int bigint float double bool timestamp binary(x) nchar(x) +column(A) ::= ids(X) typename(Y). { + tSQLSetColumnInfo(&A, &X, &Y); +} + +%type tagitemlist {tVariantList*} +%destructor tagitemlist {tVariantListDestroy($$);} + +%type tagitem {tVariant} +tagitemlist(A) ::= tagitemlist(X) COMMA tagitem(Y). { A = tVariantListAppend(X, &Y, -1); } +tagitemlist(A) ::= tagitem(X). { A = tVariantListAppend(NULL, &X, -1); } + +tagitem(A) ::= INTEGER(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= FLOAT(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= STRING(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= BOOL(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= NULL(X). { X.type = TK_STRING; toTSDBType(X.type); tVariantCreate(&A, &X); } + +tagitem(A) ::= MINUS(X) INTEGER(Y).{ + X.n += Y.n; + X.type = Y.type; + toTSDBType(X.type); + tVariantCreate(&A, &X); +} + +tagitem(A) ::= MINUS(X) FLOAT(Y). { + X.n += Y.n; + X.type = Y.type; + toTSDBType(X.type); + tVariantCreate(&A, &X); +} + + +//////////////////////// The SELECT statement ///////////////////////////////// +cmd ::= select(X). { + setSQLInfo(pInfo, X, NULL, TSQL_QUERY_METER); +} + +%type select {SQuerySQL*} +%destructor select {destroyQuerySql($$);} +select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { + A = tSetQuerySQLElems(&T, W, &X, Y, P, Z, &K, &S, F, &L, &G); +} + +// selcollist is a list of expressions that are to become the return +// values of the SELECT statement. The "*" in statements like +// "SELECT * FROM ..." is encoded as a special expression with an opcode of TK_ALL. +%type selcollist {tSQLExprList*} +%destructor selcollist {tSQLExprListDestroy($$);} + +%type sclp {tSQLExprList*} +%destructor sclp {tSQLExprListDestroy($$);} +sclp(A) ::= selcollist(X) COMMA. {A = X;} +sclp(A) ::= . {A = 0;} +selcollist(A) ::= sclp(P) expr(X) as(Y). { + A = tSQLExprListAppend(P, X, Y.n?&Y:0); +} + +selcollist(A) ::= sclp(P) STAR. { + tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); + A = tSQLExprListAppend(P, pNode, 0); +} + +selcollist(A) ::= sclp(P) ID(X) DOT STAR. { + tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); + A = tSQLExprListAppend(P, pNode, 0); +} + +// An option "AS " phrase that can follow one of the expressions that +// define the result set, or one of the tables in the FROM clause. +// +%type as {SSQLToken} +as(X) ::= AS ids(Y). { X = Y; } +as(X) ::= ids(Y). { X = Y; } +as(X) ::= . { X.n = 0; } + +// A complete FROM clause. +%type from {SSQLToken} +// current not support query from no-table +from(A) ::= FROM ids(X) cpxName(Y). {A = X; A.n += Y.n;} + +// The value of interval should be the form of "number+[a,s,m,h,d,n,y]" or "now" +%type tmvar {SSQLToken} +tmvar(A) ::= VARIABLE(X). {A = X;} + +%type interval_opt {SSQLToken} +interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N = E; } +interval_opt(N) ::= . {N.n = 0; } + +%type fill_opt {tVariantList*} +%destructor fill_opt {tVariantListDestroy($$);} +fill_opt(N) ::= . {N = 0; } +fill_opt(N) ::= FILL LP ID(Y) COMMA tagitemlist(X) RP. { + tVariant A = {0}; + toTSDBType(Y.type); + tVariantCreate(&A, &Y); + + tVariantListInsert(X, &A, -1, 0); + N = X; +} + +fill_opt(N) ::= FILL LP ID(Y) RP. { + tVariant A = {0}; + toTSDBType(Y.type); + tVariantCreate(&A, &Y); + + N = tVariantListAppend(NULL, &A, -1); +} + +%type sliding_opt {SSQLToken} +sliding_opt(K) ::= SLIDING LP tmvar(E) RP. {K = E; } +sliding_opt(K) ::= . {K.n = 0; } + +%type orderby_opt {tVariantList*} +%destructor orderby_opt {tVariantListDestroy($$);} + +%type sortlist {tVariantList*} +%destructor sortlist {tVariantListDestroy($$);} + +%type sortitem {tVariant} +%destructor sortitem {tVariantDestroy(&$$);} + +orderby_opt(A) ::= . {A = 0;} +orderby_opt(A) ::= ORDER BY sortlist(X). {A = X;} + +sortlist(A) ::= sortlist(X) COMMA item(Y) sortorder(Z). { + A = tVariantListAppend(X, &Y, Z); +} + +%type item {tVariant} +sortlist(A) ::= item(Y) sortorder(Z). { + A = tVariantListAppend(NULL, &Y, Z); +} + +item(A) ::= ids(X). { + toTSDBType(X.type); + tVariantCreate(&A, &X); +} + +%type sortorder {int} +sortorder(A) ::= ASC. {A = TSQL_SO_ASC; } +sortorder(A) ::= DESC. {A = TSQL_SO_DESC;} +sortorder(A) ::= . {A = TSQL_SO_ASC;} //default is descend order + +//group by clause +%type groupby_opt {tVariantList*} +%destructor groupby_opt {tVariantListDestroy($$);} +%type grouplist {tVariantList*} +%destructor grouplist {tVariantListDestroy($$);} + +groupby_opt(A) ::= . {A = 0;} +groupby_opt(A) ::= GROUP BY grouplist(X). {A = X;} + +grouplist(A) ::= grouplist(X) COMMA item(Y). { + A = tVariantListAppend(X, &Y, -1); +} + +grouplist(A) ::= item(X). { + A = tVariantListAppend(NULL, &X, -1); +} + +//having clause, ignore the input condition in having +%type having_opt {tSQLExpr*} +%destructor having_opt {tSQLExprDestroy($$);} +having_opt(A) ::=. {A = 0;} +having_opt(A) ::= HAVING expr(X). {A = X;} + +//limit-offset subclause +%type limit_opt {SLimitVal} +limit_opt(A) ::= . {A.limit = -1; A.offset = 0;} +limit_opt(A) ::= LIMIT signed(X). {A.limit = X; A.offset = 0;} +limit_opt(A) ::= LIMIT signed(X) OFFSET signed(Y). + {A.limit = X; A.offset = Y;} +limit_opt(A) ::= LIMIT signed(X) COMMA signed(Y). + {A.limit = Y; A.offset = X;} + +%type slimit_opt {SLimitVal} +slimit_opt(A) ::= . {A.limit = -1; A.offset = 0;} +slimit_opt(A) ::= SLIMIT signed(X). {A.limit = X; A.offset = 0;} +slimit_opt(A) ::= SLIMIT signed(X) SOFFSET signed(Y). + {A.limit = X; A.offset = Y;} +slimit_opt(A) ::= SLIMIT signed(X) COMMA signed(Y). + {A.limit = Y; A.offset = X;} + +%type where_opt {tSQLExpr*} +%destructor where_opt {tSQLExprDestroy($$);} + +where_opt(A) ::= . {A = 0;} +where_opt(A) ::= WHERE expr(X). {A = X;} + +/////////////////////////// Expression Processing ///////////////////////////// +// +%type expr {tSQLExpr*} +%destructor expr {tSQLExprDestroy($$);} + +expr(A) ::= LP expr(X) RP. {A = X; } + +expr(A) ::= ID(X). {A = tSQLExprIdValueCreate(&X, TK_ID);} +expr(A) ::= ID(X) DOT ID(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ID);} + +expr(A) ::= INTEGER(X). {A = tSQLExprIdValueCreate(&X, TK_INTEGER);} +expr(A) ::= MINUS(X) INTEGER(Y). {X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);} +expr(A) ::= PLUS(X) INTEGER(Y). {X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);} +expr(A) ::= FLOAT(X). {A = tSQLExprIdValueCreate(&X, TK_FLOAT);} +expr(A) ::= MINUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);} +expr(A) ::= PLUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);} +expr(A) ::= STRING(X). {A = tSQLExprIdValueCreate(&X, TK_STRING);} +expr(A) ::= NOW(X). {A = tSQLExprIdValueCreate(&X, TK_NOW); } +expr(A) ::= VARIABLE(X). {A = tSQLExprIdValueCreate(&X, TK_VARIABLE);} +expr(A) ::= BOOL(X). {A = tSQLExprIdValueCreate(&X, TK_BOOL);} +// normal functions: min(x) +expr(A) ::= ID(X) LP exprlist(Y) RP(E). { + A = tSQLExprCreateFunction(Y, &X, &E, X.type); +} + +// this is for: count(*)/first(*)/last(*) operation +expr(A) ::= ID(X) LP STAR RP(Y). { + A = tSQLExprCreateFunction(NULL, &X, &Y, X.type); +} + +//binary expression: a+2, b+3 +expr(A) ::= expr(X) AND expr(Y). {A = tSQLExprCreate(X, Y, TK_AND);} +expr(A) ::= expr(X) OR expr(Y). {A = tSQLExprCreate(X, Y, TK_OR); } + +//binary relational expression +expr(A) ::= expr(X) LT expr(Y). {A = tSQLExprCreate(X, Y, TK_LT);} +expr(A) ::= expr(X) GT expr(Y). {A = tSQLExprCreate(X, Y, TK_GT);} +expr(A) ::= expr(X) LE expr(Y). {A = tSQLExprCreate(X, Y, TK_LE);} +expr(A) ::= expr(X) GE expr(Y). {A = tSQLExprCreate(X, Y, TK_GE);} +expr(A) ::= expr(X) NE expr(Y). {A = tSQLExprCreate(X, Y, TK_NE);} +expr(A) ::= expr(X) EQ expr(Y). {A = tSQLExprCreate(X, Y, TK_EQ);} + +//binary arithmetic expression +expr(A) ::= expr(X) PLUS expr(Y). {A = tSQLExprCreate(X, Y, TK_PLUS); } +expr(A) ::= expr(X) MINUS expr(Y). {A = tSQLExprCreate(X, Y, TK_MINUS); } +expr(A) ::= expr(X) STAR expr(Y). {A = tSQLExprCreate(X, Y, TK_STAR); } +expr(A) ::= expr(X) SLASH expr(Y). {A = tSQLExprCreate(X, Y, TK_DIVIDE);} +expr(A) ::= expr(X) REM expr(Y). {A = tSQLExprCreate(X, Y, TK_REM); } + +//like expression +expr(A) ::= expr(X) LIKE expr(Y). {A = tSQLExprCreate(X, Y, TK_LIKE); } + +//in expression +expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSQLExprCreate(X, (tSQLExpr*)Y, TK_IN); } + +%type exprlist {tSQLExprList*} +%destructor exprlist {tSQLExprListDestroy($$);} + +%type expritem {tSQLExpr*} +%destructor expritem {tSQLExprDestroy($$);} + +exprlist(A) ::= exprlist(X) COMMA expritem(Y). {A = tSQLExprListAppend(X,Y,0);} +exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);} +expritem(A) ::= expr(X). {A = X;} +expritem(A) ::= . {A = 0;} + +////////////////////////// The INSERT command ///////////////////////////////// +// add support "values() values() values() tags()" operation.... +cmd ::= INSERT INTO cpxName(X) insert_value_list(K). { + tSetInsertSQLElems(pInfo, &X, K); +} + +%type insert_value_list {tSQLExprListList*} +insert_value_list(X) ::= VALUES LP itemlist(Y) RP. {X = tSQLListListAppend(NULL, Y);} +insert_value_list(X) ::= insert_value_list(K) VALUES LP itemlist(Y) RP. +{X = tSQLListListAppend(K, Y);} + +//cmd ::= INSERT INTO cpxName(X) select(S). +// {sqliteInsert(pParse, sqliteSrcListAppend(0,&X,&D), 0, S, F, R);} + +%type itemlist {tSQLExprList*} +%destructor itemlist {tSQLExprListDestroy($$);} + +itemlist(A) ::= itemlist(X) COMMA expr(Y). {A = tSQLExprListAppend(X,Y,0);} +itemlist(A) ::= expr(X). {A = tSQLExprListAppend(0,X,0);} + +///////////////////////////////////reset query cache////////////////////////////////////// +cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} + +///////////////////////////////////ALTER TABLE statement////////////////////////////////// +cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). { + X.n += F.n; + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_ADD_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); +} + +cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { + X.n += F.n; + + toTSDBType(A.type); + + tVariant V; + tVariantCreate(&V, &A); + + tVariantList* K = tVariantListAppend(NULL, &V, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, ALTER_TABLE_DROP_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); +} + +//////////////////////////////////ALTER TAGS statement///////////////////////////////////// +cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). { + X.n += Y.n; + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_TAGS_ADD); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); +} +cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). { + X.n += Z.n; + + toTSDBType(Y.type); + + tVariant V; + tVariantCreate(&V, &Y); + + tVariantList* A = tVariantListAppend(NULL, &V, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_DROP); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); +} + +cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { + X.n += F.n; + + tVariant V; + toTSDBType(Y.type); + tVariantCreate(&V, &Y); + + tVariantList* A = tVariantListAppend(NULL, &V, -1); + + toTSDBType(Z.type); + tVariantCreate(&V, &Z); + A = tVariantListAppend(A, &V, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_CHG); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); +} + +cmd ::= ALTER TABLE ids(X) cpxName(F) SET ids(Y) EQ tagitem(Z). { + X.n += F.n; + + tVariant V; + toTSDBType(Y.type); + tVariantCreate(&V, &Y); + + tVariantList* A = tVariantListAppend(NULL, &V, -1); + A = tVariantListAppend(A, &Z, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_SET); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); +} + +////////////////////////////////////////kill statement/////////////////////////////////////// +cmd ::= KILL CONNECTION IP(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &X);} +cmd ::= KILL STREAM IP(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &X);} +cmd ::= KILL QUERY IP(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &X);} + +%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED + DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD + LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL + COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF + SPREAD WAVG INTERP LAST_ROW NOW IP SEMI NONE PREV LINEAR IMPORT METRIC TBNAME JOIN METRICS STABLE. \ No newline at end of file diff --git a/src/inc/taos.h b/src/inc/taos.h new file mode 100644 index 000000000000..a9df90609881 --- /dev/null +++ b/src/inc/taos.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TAOS_H +#define TDENGINE_TAOS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define TAOS void +#define TAOS_ROW void ** +#define TAOS_RES void +#define TAOS_SUB void +#define TAOS_STREAM void + +#define TSDB_DATA_TYPE_NULL 0 +#define TSDB_DATA_TYPE_BOOL 1 // 1 bytes +#define TSDB_DATA_TYPE_TINYINT 2 // 1 byte +#define TSDB_DATA_TYPE_SMALLINT 3 // 2 bytes +#define TSDB_DATA_TYPE_INT 4 // 4 bytes +#define TSDB_DATA_TYPE_BIGINT 5 // 8 bytes +#define TSDB_DATA_TYPE_FLOAT 6 // 4 bytes +#define TSDB_DATA_TYPE_DOUBLE 7 // 8 bytes +#define TSDB_DATA_TYPE_BINARY 8 // string +#define TSDB_DATA_TYPE_TIMESTAMP 9 // 8 bytes +#define TSDB_DATA_TYPE_NCHAR 10 // multibyte string + +typedef enum { + TSDB_OPTION_LOCALE, + TSDB_OPTION_CHARSET, + TSDB_OPTION_TIMEZONE, + TSDB_OPTION_CONFIGDIR, + TSDB_OPTION_SHELL_ACTIVITY_TIMER, + TSDB_MAX_OPTIONS +} TSDB_OPTION; + +typedef struct taosField{ + char name[64]; + short bytes; + char type; +} TAOS_FIELD; + +void taos_init(); +int taos_options(TSDB_OPTION option, const void *arg, ...); +TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port); +void taos_close(TAOS *taos); +int taos_query(TAOS *taos, char *sqlstr); +TAOS_RES *taos_use_result(TAOS *taos); +TAOS_ROW taos_fetch_row(TAOS_RES *res); +int taos_result_precision(TAOS_RES *res); // get the time precision of result +void taos_free_result(TAOS_RES *res); +int taos_field_count(TAOS *taos); +int taos_num_fields(TAOS_RES *res); +int taos_affected_rows(TAOS *taos); +TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); +int taos_select_db(TAOS *taos, char *db); +int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +void taos_stop_query(TAOS_RES *res); + +int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +int taos_validate_sql(TAOS *taos, char *sql); + +// TAOS_RES *taos_list_tables(TAOS *mysql, const char *wild); +// TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild); + +char *taos_get_server_info(TAOS *taos); +char *taos_get_client_info(); +char *taos_errstr(TAOS *taos); +int taos_errno(TAOS *taos); + +void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int code), void *param); +void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); +void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); + +TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds); +TAOS_ROW taos_consume(TAOS_SUB *tsub); +void taos_unsubscribe(TAOS_SUB *tsub); +int taos_subfields_count(TAOS_SUB *tsub); +TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub); + +TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + int64_t stime, void *param, void (*callback)(void *)); +void taos_close_stream(TAOS_STREAM *tstr); + +extern char configDir[]; // the path to global configuration + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h new file mode 100644 index 000000000000..c25664a5229a --- /dev/null +++ b/src/inc/taosmsg.h @@ -0,0 +1,911 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TAOSMSG_H +#define TDENGINE_TAOSMSG_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tsdb.h" + +#define TSDB_CODE_SUCCESS 0 +#define TSDB_CODE_ACTION_IN_PROGRESS 1 + +#define TSDB_CODE_LAST_SESSION_NOT_FINISHED 5 +#define TSDB_CODE_INVALID_SESSION_ID 6 +#define TSDB_CODE_INVALID_TRAN_ID 7 +#define TSDB_CODE_INVALID_MSG_TYPE 8 +#define TSDB_CODE_ALREADY_PROCESSED 9 +#define TSDB_CODE_AUTH_FAILURE 10 +#define TSDB_CODE_WRONG_MSG_SIZE 11 +#define TSDB_CODE_UNEXPECTED_RESPONSE 12 +#define TSDB_CODE_INVALID_RESPONSE_TYPE 13 +#define TSDB_CODE_NO_RESOURCE 14 +#define TSDB_CODE_INVALID_TIME_STAMP 15 +#define TSDB_CODE_MISMATCHED_METER_ID 16 +#define TSDB_CODE_ACTION_TRANS_NOT_FINISHED 17 +#define TSDB_CODE_ACTION_NOT_ONLINE 18 +#define TSDB_CODE_ACTION_SEND_FAILD 19 +#define TSDB_CODE_NOT_ACTIVE_SESSION 20 +#define TSDB_CODE_INSERT_FAILED 21 +#define TSDB_CODE_APP_ERROR 22 +#define TSDB_CODE_INVALID_IE 23 +#define TSDB_CODE_INVALID_VALUE 24 +#define TSDB_CODE_REDIRECT 25 +#define TSDB_CODE_ALREADY_THERE 26 +#define TSDB_CODE_INVALID_METER_ID 27 +#define TSDB_CODE_INVALID_SQL 28 +#define TSDB_CODE_NETWORK_UNAVAIL 29 +#define TSDB_CODE_INVALID_MSG_LEN 30 +#define TSDB_CODE_INVALID_DB 31 +#define TSDB_CODE_INVALID_TABLE 32 +#define TSDB_CODE_DB_ALREADY_EXIST 33 +#define TSDB_CODE_TABLE_ALREADY_EXIST 34 +#define TSDB_CODE_INVALID_USER 35 +#define TSDB_CODE_INVALID_ACCT 36 +#define TSDB_CODE_INVALID_PASS 37 +#define TSDB_CODE_DB_NOT_SELECTED 38 +#define TSDB_CODE_MEMORY_CORRUPTED 39 +#define TSDB_CODE_USER_ALREADY_EXIST 40 +#define TSDB_CODE_NO_RIGHTS 41 +#define TSDB_CODE_DISCONNECTED 42 +#define TSDB_CODE_NO_MASTER 43 +#define TSDB_CODE_NOT_CONFIGURED 44 +#define TSDB_CODE_INVALID_OPTION 45 +#define TSDB_CODE_NODE_OFFLINE 46 +#define TSDB_CODE_SYNC_REQUIRED 47 +#define TSDB_CODE_NO_ENOUGH_PNODES 48 +#define TSDB_CODE_UNSYNCED 49 +#define TSDB_CODE_TOO_SLOW 50 +#define TSDB_CODE_OTHERS 51 +#define TSDB_CODE_NO_REMOVE_MASTER 52 +#define TSDB_CODE_WRONG_SCHEMA 53 +#define TSDB_CODE_NO_RESULT 54 +#define TSDB_CODE_TOO_MANY_USERS 55 +#define TSDB_CODE_TOO_MANY_DATABSES 56 +#define TSDB_CODE_TOO_MANY_TABLES 57 +#define TSDB_CODE_TOO_MANY_DNODES 58 +#define TSDB_CODE_TOO_MANY_ACCTS 59 +#define TSDB_CODE_ACCT_ALREADY_EXIST 60 +#define TSDB_CODE_DNODE_ALREADY_EXIST 61 +#define TSDB_CODE_SDB_ERROR 62 +#define TSDB_CODE_METRICMETA_EXPIRED 63 // local cached metric-meta expired causes error in metric query +#define TSDB_CODE_NOT_READY 64 // peer is not ready to process data +#define TSDB_CODE_MAX_SESSIONS 65 // too many sessions +#define TSDB_CODE_MAX_CONNECTIONS 66 // too many connections +#define TSDB_CODE_SESSION_ALREADY_EXIST 67 +#define TSDB_CODE_NO_QSUMMARY 68 +#define TSDB_CODE_SERV_OUT_OF_MEMORY 69 +#define TSDB_CODE_INVALID_QHANDLE 70 +#define TSDB_CODE_RELATED_TABLES_EXIST 71 +#define TSDB_CODE_MONITOR_DB_FORBEIDDEN 72 +#define TSDB_CODE_VG_COMMITLOG_INIT_FAILED 73 +#define TSDB_CODE_VG_INIT_FAILED 74 +#define TSDB_CODE_DATA_ALREADY_IMPORTED 75 +#define TSDB_CODE_OPS_NOT_SUPPORT 76 +#define TSDB_CODE_INVALID_QUERY_ID 77 +#define TSDB_CODE_INVALID_STREAM_ID 78 +#define TSDB_CODE_INVALID_CONNECTION 79 +#define TSDB_CODE_ACTION_NOT_BALANCED 80 +#define TSDB_CODE_CLI_OUT_OF_MEMORY 81 +#define TSDB_CODE_DATA_OVERFLOW 82 +#define TSDB_CODE_QUERY_CANCELLED 83 +#define TSDB_CODE_GRANT_TIMESERIES_LIMITED 84 +#define TSDB_CODE_GRANT_EXPIRED 85 +#define TSDB_CODE_CLI_NO_DISKSPACE 86 +#define TSDB_CODE_FILE_CORRUPTED 87 +#define TSDB_CODE_INVALID_CLIENT_VERSION 88 +#define TSDB_CODE_INVALID_ACCT_PARAMETER 89 +#define TSDB_CODE_NOT_ENOUGH_TIME_SERIES 90 +#define TSDB_CODE_NO_WRITE_ACCESS 91 +#define TSDB_CODE_NO_READ_ACCESS 92 +#define TSDB_CODE_GRANT_DB_LIMITED 93 +#define TSDB_CODE_GRANT_USER_LIMITED 94 +#define TSDB_CODE_GRANT_CONN_LIMITED 95 +#define TSDB_CODE_GRANT_STREAM_LIMITED 96 +#define TSDB_CODE_GRANT_SPEED_LIMITED 97 +#define TSDB_CODE_GRANT_STORAGE_LIMITED 98 +#define TSDB_CODE_GRANT_QUERYTIME_LIMITED 99 +#define TSDB_CODE_GRANT_ACCT_LIMITED 100 +#define TSDB_CODE_GRANT_DNODE_LIMITED 101 +#define TSDB_CODE_GRANT_CPU_LIMITED 102 +#define TSDB_CODE_SESSION_NOT_READY 103 // table NOT in ready state +#define TSDB_CODE_BATCH_SIZE_TOO_BIG 104 +#define TSDB_CODE_TIMESTAMP_OUT_OF_RANGE 105 +#define TSDB_CODE_INVALID_QUERY_MSG 106 // failed to validate the sql expression msg by vnode + +// message type +#define TSDB_MSG_TYPE_REG 1 +#define TSDB_MSG_TYPE_REG_RSP 2 +#define TSDB_MSG_TYPE_SUBMIT 3 +#define TSDB_MSG_TYPE_SUBMIT_RSP 4 +#define TSDB_MSG_TYPE_NWCHANGE 5 +#define TSDB_MSG_TYPE_NWCHANGE_RSP 6 +#define TSDB_MSG_TYPE_DELIVER 7 +#define TSDB_MSG_TYPE_DELIVER_RSP 8 + +#define TSDB_MSG_TYPE_CREATE 9 +#define TSDB_MSG_TYPE_CREATE_RSP 10 +#define TSDB_MSG_TYPE_REMOVE 11 +#define TSDB_MSG_TYPE_REMOVE_RSP 12 +#define TSDB_MSG_TYPE_VPEERS 13 +#define TSDB_MSG_TYPE_VPEERS_RSP 14 +#define TSDB_MSG_TYPE_FREE_VNODE 15 +#define TSDB_MSG_TYPE_FREE_VNODE_RSP 16 +#define TSDB_MSG_TYPE_VPEER_CFG 17 +#define TSDB_MSG_TYPE_VPEER_CFG_RSP 18 +#define TSDB_MSG_TYPE_METER_CFG 19 +#define TSDB_MSG_TYPE_METER_CFG_RSP 20 + +#define TSDB_MSG_TYPE_VPEER_FWD 21 +#define TSDB_MSG_TYPE_VPEER_FWD_RSP 22 +#define TSDB_MSG_TYPE_SYNC 23 +#define TSDB_MSG_TYPE_SYNC_RSP 24 + +#define TSDB_MSG_TYPE_INSERT 25 +#define TSDB_MSG_TYPE_INSERT_RSP 26 +#define TSDB_MSG_TYPE_QUERY 27 +#define TSDB_MSG_TYPE_QUERY_RSP 28 +#define TSDB_MSG_TYPE_RETRIEVE 29 +#define TSDB_MSG_TYPE_RETRIEVE_RSP 30 + +#define TSDB_MSG_TYPE_CONNECT 31 +#define TSDB_MSG_TYPE_CONNECT_RSP 32 +#define TSDB_MSG_TYPE_CREATE_ACCT 33 +#define TSDB_MSG_TYPE_CREATE_ACCT_RSP 34 +#define TSDB_MSG_TYPE_CREATE_USER 35 +#define TSDB_MSG_TYPE_CREATE_USER_RSP 36 +#define TSDB_MSG_TYPE_DROP_ACCT 37 +#define TSDB_MSG_TYPE_DROP_ACCT_RSP 38 +#define TSDB_MSG_TYPE_DROP_USER 39 +#define TSDB_MSG_TYPE_DROP_USER_RSP 40 +#define TSDB_MSG_TYPE_ALTER_USER 41 +#define TSDB_MSG_TYPE_ALTER_USER_RSP 42 +#define TSDB_MSG_TYPE_CREATE_MNODE 43 +#define TSDB_MSG_TYPE_CREATE_MNODE_RSP 44 +#define TSDB_MSG_TYPE_DROP_MNODE 45 +#define TSDB_MSG_TYPE_DROP_MNODE_RSP 46 +#define TSDB_MSG_TYPE_CREATE_PNODE 47 +#define TSDB_MSG_TYPE_CREATE_PNODE_RSP 48 +#define TSDB_MSG_TYPE_DROP_PNODE 49 +#define TSDB_MSG_TYPE_DROP_PNODE_RSP 50 +#define TSDB_MSG_TYPE_CREATE_DB 51 +#define TSDB_MSG_TYPE_CREATE_DB_RSP 52 +#define TSDB_MSG_TYPE_DROP_DB 53 +#define TSDB_MSG_TYPE_DROP_DB_RSP 54 +#define TSDB_MSG_TYPE_USE_DB 55 +#define TSDB_MSG_TYPE_USE_DB_RSP 56 +#define TSDB_MSG_TYPE_CREATE_TABLE 57 +#define TSDB_MSG_TYPE_CREATE_TABLE_RSP 58 +#define TSDB_MSG_TYPE_DROP_TABLE 59 +#define TSDB_MSG_TYPE_DROP_TABLE_RSP 60 +#define TSDB_MSG_TYPE_METERINFO 61 +#define TSDB_MSG_TYPE_METERINFO_RSP 62 +#define TSDB_MSG_TYPE_METRIC_META 63 +#define TSDB_MSG_TYPE_METRIC_META_RSP 64 +#define TSDB_MSG_TYPE_SHOW 65 +#define TSDB_MSG_TYPE_SHOW_RSP 66 + +#define TSDB_MSG_TYPE_FORWARD 67 +#define TSDB_MSG_TYPE_FORWARD_RSP 68 + +#define TSDB_MSG_TYPE_CFG_PNODE 69 +#define TSDB_MSG_TYPE_CFG_PNODE_RSP 70 +#define TSDB_MSG_TYPE_CFG_MNODE 71 +#define TSDB_MSG_TYPE_CFG_MNODE_RSP 72 + +#define TSDB_MSG_TYPE_KILL_QUERY 73 +#define TSDB_MSG_TYPE_KILL_QUERY_RSP 74 +#define TSDB_MSG_TYPE_KILL_STREAM 75 +#define TSDB_MSG_TYPE_KILL_STREAM_RSP 76 +#define TSDB_MSG_TYPE_KILL_CONNECTION 77 +#define TSDB_MSG_TYPE_KILL_CONNECTION_RSP 78 + +#define TSDB_MSG_TYPE_ALTER_STREAM 79 +#define TSDB_MSG_TYPE_ALTER_STREAM_RSP 80 +#define TSDB_MSG_TYPE_ALTER_TABLE 81 +#define TSDB_MSG_TYPE_ALTER_TABLE_RSP 82 +#define TSDB_MSG_TYPE_ALTER_DB 83 +#define TSDB_MSG_TYPE_ALTER_DB_RSP 84 + +#define TSDB_MSG_TYPE_HEARTBEAT 91 +#define TSDB_MSG_TYPE_HEARTBEAT_RSP 92 +#define TSDB_MSG_TYPE_STATUS 93 +#define TSDB_MSG_TYPE_STATUS_RSP 94 +#define TSDB_MSG_TYPE_GRANT 95 +#define TSDB_MSG_TYPE_GRANT_RSP 96 + +#define TSDB_MSG_TYPE_ALTER_ACCT 97 +#define TSDB_MSG_TYPE_ALTER_ACCT_RSP 98 +#define TSDB_MSG_TYPE_MAX 99 + +// IE type +#define TSDB_IE_TYPE_SEC 1 +#define TSDB_IE_TYPE_META 2 +#define TSDB_IE_TYPE_MGMT_IP 3 +#define TSDB_IE_TYPE_DNODE_CFG 4 +#define TSDB_IE_TYPE_NEW_VERSION 5 +#define TSDB_IE_TYPE_DNODE_EXT 6 +#define TSDB_IE_TYPE_DNODE_STATE 7 + +// mgmt table +enum _mgmt_table { + TSDB_MGMT_TABLE_USER, + TSDB_MGMT_TABLE_DB, + TSDB_MGMT_TABLE_TABLE, + TSDB_MGMT_TABLE_PNODE, + TSDB_MGMT_TABLE_VGROUP, + TSDB_MGMT_TABLE_METRIC, + TSDB_MGMT_TABLE_QUERIES, + TSDB_MGMT_TABLE_STREAMS, + TSDB_MGMT_TABLE_CONNS, + TSDB_MGMT_TABLE_MAX, +}; + +#define TSDB_ALTER_TABLE_ADD_TAG_COLUMN 1 +#define TSDB_ALTER_TABLE_DROP_TAG_COLUMN 2 +#define TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN 3 +#define TSDB_ALTER_TABLE_UPDATE_TAG_VAL 4 + +#define TSDB_ALTER_TABLE_ADD_COLUMN 5 +#define TSDB_ALTER_TABLE_DROP_COLUMN 6 + +#define TSDB_INTERPO_NONE 0 +#define TSDB_INTERPO_NULL 1 +#define TSDB_INTERPO_SET_VALUE 2 +#define TSDB_INTERPO_LINEAR 3 +#define TSDB_INTERPO_PREV 4 + +#define TSDB_ALTER_USER_PASSWD 0x1 +#define TSDB_ALTER_USER_PRIVILEGES 0x2 + +#define TSDB_KILL_MSG_LEN 30 + +#define TSDB_METER_METRIC 0 // metric +#define TSDB_METER_MTABLE 1 // table created from metric +#define TSDB_METER_OTABLE 2 // ordinary table +#define TSDB_METER_STABLE 3 // table created from stream computing +#define TSDB_MAX_METER_TYPES 4 + +#define TSDB_VN_READ_ACCCESS ((char)0x1) +#define TSDB_VN_WRITE_ACCCESS ((char)0x2) +#define TSDB_VN_ALL_ACCCESS (TSDB_VN_READ_ACCCESS|TSDB_VN_WRITE_ACCCESS) + + +extern char *taosMsg[]; +extern char *tsError[]; + +#pragma pack(1) + +typedef struct { + char numOfIps; + uint32_t ip[]; +} SIpList; + +typedef struct { + char numOfIps; + uint32_t ip[TSDB_MAX_MGMT_IPS]; +} SMgmtIpList; + +typedef struct { + char version : 4; + char comp : 4; + char tcp : 2; + char spi : 3; + char encrypt : 3; + uint16_t tranId; + uint32_t uid; // for unique ID inside a client + uint32_t sourceId; + + // internal part + uint32_t destId; + char meterId[TSDB_UNI_LEN]; + short port; // for UDP only + char empty[1]; + char msgType; + int32_t msgLen; + uint8_t content[0]; +} STaosHeader; + +typedef struct { + uint32_t timeStamp; + uint8_t auth[TSDB_AUTH_LEN]; +} STaosDigest; + +typedef struct { + unsigned char code; + char more[]; +} STaosRsp, SMsgReply; + +typedef struct { + uint32_t customerId; + uint32_t osId; + uint32_t appId; + char hwId[TSDB_UNI_LEN]; + char hwVersion[TSDB_VERSION_LEN]; + char osVersion[TSDB_VERSION_LEN]; + char appVersion[TSDB_VERSION_LEN]; + char sdkVersion[TSDB_VERSION_LEN]; + char name[TSDB_UNI_LEN]; + char street[TSDB_STREET_LEN]; + char city[TSDB_CITY_LEN]; + char state[TSDB_STATE_LEN]; + char country[TSDB_COUNTRY_LEN]; + uint32_t longitude; + uint32_t latitude; +} SRegMsg; + +typedef struct { + short numOfRows; + char payLoad[]; +} SSubmitMsg; + +typedef struct { + int32_t sid; + int32_t sversion; + uint64_t uid; + short numOfRows; + char payLoad[]; +} SShellSubmitBlock; + +typedef struct { + short import; + short vnode; + int32_t numOfSid; /* total number of sid */ + char blks[]; /* numOfSid blocks, each blocks for one meter */ +} SShellSubmitMsg; + +typedef struct SSchema { + char type; + char name[TSDB_COL_NAME_LEN]; + short colId; + short bytes; +} SSchema; + +typedef struct SMColumn { + char type; + short colId; + short bytes; +} SMColumn; + +typedef struct { + short vnode; + int32_t sid; + uint64_t uid; + char spi; + char encrypt; + char meterId[TSDB_METER_ID_LEN]; + char secret[TSDB_KEY_LEN]; + char cipheringKey[TSDB_KEY_LEN]; + uint64_t timeStamp; + uint64_t lastCreate; + short numOfColumns; + short sqlLen; // SQL string is after schema + char reserved[16]; + int32_t sversion; + SMColumn schema[]; +} SCreateMsg; + +typedef struct { + char db[TSDB_DB_NAME_LEN]; + short ignoreNotExists; +} SDropDbMsg, SUseDbMsg; + +typedef struct { char user[TSDB_USER_LEN]; } SDropUserMsg, SDropAcctMsg; + +typedef struct { char db[TSDB_DB_NAME_LEN]; } SShowTableMsg; + +typedef struct { + char meterId[TSDB_METER_ID_LEN]; + char igExists; + + short numOfTags; + + short numOfColumns; + short sqlLen; // the length of SQL, it starts after schema , sql is a + // null-terminated string + char reserved[16]; + + SSchema schema[]; +} SCreateTableMsg; + +typedef struct { + char meterId[TSDB_METER_ID_LEN]; + char igNotExists; +} SDropTableMsg; + +typedef struct { + char meterId[TSDB_METER_ID_LEN]; + short type; /* operation type */ + char tagVal[TSDB_MAX_BYTES_PER_ROW]; + short numOfCols; /* number of schema */ + SSchema schema[]; +} SAlterTableMsg; + +typedef struct { char db[TSDB_METER_ID_LEN]; } SConnectMsg; + +typedef struct { + int32_t maxUsers; + int32_t maxDbs; + int32_t maxTimeSeries; + int32_t maxConnections; + int32_t maxStreams; + int32_t maxPointsPerSecond; + int64_t maxStorage; // In unit of GB + int64_t maxQueryTime; // In unit of hour + int64_t maxInbound; + int64_t maxOutbound; + char accessState; // Configured only by command +} SAcctCfg; + +typedef struct { + char user[TSDB_USER_LEN]; + char pass[TSDB_KEY_LEN]; + SAcctCfg cfg; +} SCreateAcctMsg, SAlterAcctMsg; + +typedef struct { + char user[TSDB_USER_LEN]; + char pass[TSDB_KEY_LEN]; + char privilege; + char flag; +} SCreateUserMsg, SAlterUserMsg; + +typedef struct { char db[TSDB_METER_ID_LEN]; } SMgmtHead; + +typedef struct { + char acctId[TSDB_ACCT_LEN]; + char version[TSDB_VERSION_LEN]; + char writeAuth; + char superAuth; +} SConnectRsp; + +typedef struct { + short vnode; + int32_t sid; + uint64_t uid; + char meterId[TSDB_METER_ID_LEN]; +} SRemoveMeterMsg; + +typedef struct { short vnode; } SFreeVnodeMsg; + +typedef struct SColIndex { + int16_t colId; + int16_t colIdx; + bool isTag; +} SColIndex; + +typedef struct SColIndexEx { + int16_t colId; + /* + * colIdx is the index of column in latest schema of table + * it is available in the client side. Also used to determine + * whether current meter schema is up-to-date. + * + * colIdxInBuf is used to denote the index of column in pQuery->colList, + * this value is invalid in client side, as well as in cache block of vnode + * either. + */ + int16_t colIdx; + int16_t colIdxInBuf; + bool isTag; +} SColIndexEx; + +/* sql function msg, to describe the message to vnode about sql function + * operations in select clause */ +typedef struct SSqlFuncExprMsg { + int16_t functionId; + int16_t numOfParams; + + SColIndexEx colInfo; + struct ArgElem { + int16_t argType; + int16_t argBytes; + union { + double d; + int64_t i64; + char * pz; + } argValue; + } arg[3]; +} SSqlFuncExprMsg; + +typedef struct SSqlBinaryExprInfo { + struct tSQLBinaryExpr *pBinExpr; /* for binary expression */ + int32_t numOfCols; /* binary expression involves the readed number of columns*/ + SColIndexEx * pReqColumns; /* source column list */ +} SSqlBinaryExprInfo; + +typedef struct SSqlFunctionExpr { + SSqlFuncExprMsg pBase; + SSqlBinaryExprInfo pBinExprInfo; + int16_t resBytes; + int16_t resType; +} SSqlFunctionExpr; + +typedef struct SColumnFilterMsg { + /* for client side struct, we only need the column id, type, bytes are not + * necessary + * But for data in vnode side, we need all the following information. + * */ + int16_t colId; + int16_t type; + int16_t bytes; + + int16_t filterOn; /* denote if the filter is active */ + int16_t lowerRelOptr; + int16_t upperRelOptr; + int16_t filterOnBinary; /* denote if current column is binary */ + + /* double/int64_t/float/int share the this memory */ + union { + struct { + int64_t lowerBndi; + int64_t upperBndi; + }; + struct { + double lowerBndd; + double upperBndd; + }; + struct { + int64_t pz; + int64_t len; + }; + }; +} SColumnFilterMsg; + +/* + * enable vnode to understand how to group several tables with different tag; + */ +typedef struct SMeterSidExtInfo { + // union {int32_t sid; void* pObj;}; + int32_t sid; + void * pObj; + char tags[]; +} SMeterSidExtInfo; + +/* + * the outputCols is equalled to or larger than numOfCols + * e.g., select min(colName), max(colName), avg(colName) from meter_name + * the outputCols will be 3 while the numOfCols is 1. + */ +typedef struct { + int16_t vnode; + int32_t numOfSids; + uint64_t pSidExtInfo; // meter id & tag info ptr, in windows pointer may + // occupy only 4bytes + + uint64_t uid; + char meterId[TSDB_METER_ID_LEN]; + TSKEY skey; + TSKEY ekey; + int32_t num; + + int16_t order; + int16_t orderColId; + + int16_t numOfCols; // the number of columns will be load from vnode + char intervalTimeUnit; // time interval type, for revisement of interval(1d) + + int64_t nAggTimeInterval; // time interval for aggregation, in million second + uint64_t pTagSchema; // tag schema, used to parse tag information in pSidExtInfo + + int16_t numOfTagsCols; // required number of tags + int16_t tagLength; // tag length in current query + + int16_t numOfGroupbyCols; // num of group by columns + int16_t orderByIdx; + int16_t orderType; // used in group by xx order by xxx + uint64_t groupbyTagIds; + + int64_t limit; + int64_t offset; + + int16_t metricQuery; // denote another query process + int16_t numOfOutputCols; // final output columns numbers + + int16_t interpoType; // interpolate type + uint64_t defaultVal; // default value array list + + int32_t colNameLen; + int64_t colNameList; + + int64_t pSqlFuncExprs; + SColumnFilterMsg colList[]; +} SQueryMeterMsg; + +typedef struct { + char code; + uint64_t qhandle; +} SQueryMeterRsp; + +typedef struct { + TSKEY skey; + TSKEY ekey; + int32_t num; + short order; + short numOfCols; + short colList[]; +} SQueryMsg; + +typedef struct { + uint64_t qhandle; + char free; +} SRetrieveMeterMsg; + +typedef struct { + int32_t numOfRows; + int16_t precision; + int64_t offset; // updated offset value for multi-vnode projection query + int64_t useconds; + char data[]; +} SRetrieveMeterRsp; + +typedef struct { + uint32_t vnode; + uint32_t vgId; + uint8_t status; + uint8_t dropStatus; + uint8_t accessState; + int64_t totalStorage; + int64_t compStorage; + int64_t pointsWritten; + char reserved[16]; +} SVnodeLoad; + +typedef struct { + uint32_t vnode; + char accessState; +} SVnodeAccess; + +// NOTE: sizeof(SVnodeCfg) < TSDB_FILE_HEADER_LEN/4 +typedef struct { + char acct[TSDB_USER_LEN]; + char db[TSDB_DB_NAME_LEN]; + uint32_t vgId; + int32_t maxSessions; + int32_t cacheBlockSize; + union { + int32_t totalBlocks; + float fraction; + } cacheNumOfBlocks; + int32_t daysPerFile; + + int32_t daysToKeep1; + int32_t daysToKeep2; + int32_t daysToKeep; + + int32_t commitTime; + int32_t rowsInFileBlock; + int16_t blocksPerMeter; + char compression; + char commitLog; + char replications; + + char repStrategy; + char loadLatest; // load into mem or not + char precision; // time resoluation + + char reserved[16]; +} SVnodeCfg, SCreateDbMsg, SDbCfg, SAlterDbMsg; + +// IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed +// TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE +typedef struct { + int64_t pointsWritten; // In unit of points + int64_t totalStorage; // In unit of bytes + int64_t compStorage; // In unit of bytes + int64_t queryTime; // In unit of second ?? + char reserved[64]; +} SVnodeStatisticInfo; + +typedef struct { + uint32_t version; + uint32_t publicIp; + uint32_t lastReboot; // time stamp for last reboot + uint16_t numOfCores; + uint8_t alternativeRole; + uint8_t reserve; + float memoryAvailable; // MB + float diskAvailable; // GB + uint32_t openVnodes; + char reserved[16]; + SVnodeLoad load[]; +} SStatusMsg; + +typedef struct { + uint32_t moduleStatus; + uint32_t createdTime; + uint32_t numOfVnodes; + uint32_t reserved; +} SDnodeState; + +// internal message +typedef struct { + uint32_t destId; + char meterId[TSDB_UNI_LEN]; + char empty[3]; + char msgType; + int32_t msgLen; + uint8_t content[0]; +} SIntMsg; + +typedef struct { + char spi; + char encrypt; + char secret[TSDB_KEY_LEN]; // key is changed if updated + char cipheringKey[TSDB_KEY_LEN]; +} SSecIe; + +typedef struct { + uint32_t ip; + uint32_t vnode; +} SVPeerDesc; + +typedef struct { + int32_t vnode; + SVnodeCfg cfg; + SVPeerDesc vpeerDesc[]; +} SVPeersMsg; + +typedef struct { + char meterId[TSDB_METER_ID_LEN]; + short createFlag; + char tags[]; +} SMeterInfoMsg; + +typedef struct { + char meterId[TSDB_METER_ID_LEN]; + + int16_t numOfGroupbyCols; // num of group by columns + int16_t orderIndex; + int16_t orderType; // used in group by xx order by xxx + uint64_t groupbyTagIds; + + int16_t tagCols[TSDB_MAX_TAGS + 1]; // required tag columns, plus one is for table name + int16_t numOfTags; // required number of tags + + int64_t limit; + int64_t offset; + + int32_t condLength; + int16_t type; // denotes if it has the meter id pools + char tags[]; +} SMetricMetaMsg; + +typedef struct { + SVPeerDesc vpeerDesc[TSDB_VNODES_SUPPORT]; + int16_t index; // used locally + int32_t numOfSids; + int32_t pSidExtInfoList[]; // offset value of SMeterSidExtInfo +} SVnodeSidList; + +typedef struct { + int32_t numOfMeters; + int32_t numOfVnodes; + uint16_t tagLen; /* tag value length */ + int32_t list[]; /* offset of SVnodeSidList, compared to the SMetricMeta struct */ +} SMetricMeta; + +typedef struct SMeterMeta { + int16_t numOfTags; + int16_t precision; + int16_t numOfColumns; + + int32_t sversion; + uint64_t pSchema; + SVPeerDesc vpeerDesc[TSDB_VNODES_SUPPORT]; + + int32_t sid; + int32_t vgid; + + uint64_t uid; + int16_t meterType; + int16_t index; // used locally + int32_t rowSize; // used locally, calculated in client + uint64_t tags; +} SMeterMeta; + +typedef struct { + char name[TSDB_METER_ID_LEN]; + char data[TSDB_MAX_TAGS_LEN]; +} STagData; + +/* + * sql: show tables like '%a_%' + * payload is the query condition, e.g., '%a_%' + * payloadLen is the length of payload + */ +typedef struct { + char type; + uint16_t payloadLen; + char payload[]; +} SShowMsg; + +typedef struct { char ip[20]; } SCreateMnodeMsg, SDropMnodeMsg, SCreateDnodeMsg, SDropDnodeMsg; + +typedef struct { + uint64_t qhandle; + SMeterMeta meterMeta; +} SShowRspMsg; + +typedef struct { + int32_t vnode; + int32_t sid; +} SMeterCfgMsg; + +typedef struct { int32_t vnode; } SVpeerCfgMsg; + +typedef struct { + char ip[20]; + char config[60]; +} SCfgMsg; + +typedef struct { + uint32_t queryId; + uint32_t streamId; + char killConnection; + SIpList ipList; +} SHeartBeatRsp; + +typedef struct { + char sql[TSDB_SHOW_SQL_LEN]; + uint32_t queryId; + int64_t useconds; + int64_t stime; +} SQDesc; + +typedef struct { + char sql[TSDB_SHOW_SQL_LEN]; + uint32_t streamId; + int64_t num; // number of computing/cycles + int64_t useconds; + int64_t ctime; + int64_t stime; + int64_t slidingTime; + int64_t interval; +} SSDesc; + +typedef struct { + int32_t numOfQueries; + SQDesc qdesc[]; +} SQList; + +typedef struct { + int32_t numOfStreams; + SSDesc sdesc[]; +} SSList; + +typedef struct { + uint64_t handle; + char queryId[TSDB_KILL_MSG_LEN]; +} SKillQuery, SKillStream, SKillConnection; + +typedef struct { + short vnode; + int32_t sid; + uint64_t uid; + uint64_t stime; // stream starting time + char status; +} SAlterStreamMsg; + +#pragma pack() + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/tast.h b/src/inc/tast.h new file mode 100644 index 000000000000..37b7765ee2d4 --- /dev/null +++ b/src/inc/tast.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TAST_H +#define TDENGINE_TAST_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#include "tsql.h" + +struct tSQLBinaryExpr; +struct SSchema; +struct tSkipList; +struct tSkipListNode; + +enum { + TSQL_NODE_EXPR = 0x1, + TSQL_NODE_COL = 0x2, + TSQL_NODE_VALUE = 0x4, +}; + +typedef struct tSQLSyntaxNode { + uint8_t nodeType; + int16_t colId; // for schema, the id of column + union { + struct tSQLBinaryExpr *pExpr; + struct SSchema * pSchema; + tVariant * pVal; + }; +} tSQLSyntaxNode; + +typedef struct tSQLBinaryExpr { + uint8_t nSQLBinaryOptr; + uint8_t filterOnPrimaryKey; // 0: do not contain primary filter, 1: contain + // primary key + + tSQLSyntaxNode *pLeft; + tSQLSyntaxNode *pRight; +} tSQLBinaryExpr; + +#define TAST_NODE_TYPE_INDEX_ENTRY 0 +#define TAST_NODE_TYPE_METER_PTR 1 + +typedef struct tQueryResultset { + void ** pRes; + int64_t num; + int32_t nodeType; +} tQueryResultset; + +typedef struct tQueryInfo { + int32_t offset; // offset value in tags + int32_t colIdx; // index of column in schema + struct SSchema *pSchema; // schema of tags + tVariant q; // queries cond + uint8_t optr; + __compar_fn_t comparator; +} tQueryInfo; + +void tSQLBinaryExprFromString(tSQLBinaryExpr **pExpr, struct SSchema *pSchema, int32_t numOfCols, char *src, + int32_t len); + +void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len); + +void tSQLBinaryExprDestroy(tSQLBinaryExpr **pExprs); + +void tSQLBinaryExprTraverse(tSQLBinaryExpr *pExprs, struct tSkipList *pSkipList, struct SSchema *pSchema, + int32_t numOfCols, bool (*fp)(struct tSkipListNode *, void *), tQueryResultset *result); + +void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order, + char *(*cb)(void *, char *, int32_t)); + +void tSQLBinaryExprTrv(tSQLBinaryExpr *pExprs, int32_t *val, int16_t *ids); + +bool tSQLElemFilterCallback(struct tSkipListNode *pNode, void *param); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TAST_H diff --git a/src/inc/tcache.h b/src/inc/tcache.h new file mode 100644 index 000000000000..805e5a45af38 --- /dev/null +++ b/src/inc/tcache.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TCACHE_H +#define TDENGINE_TCACHE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * + * @param maxSessions maximum slots available for hash elements + * @param tmrCtrl timer ctrl + * @param refreshTime refresh operation interval time, the maximum survival time when one element is expired and + * not referenced by other objects + * @return + */ +void *taosInitDataCache(int maxSessions, void *tmrCtrl, int64_t refreshTimeInSeconds); + +/** + * add data into cache + * + * @param handle cache object + * @param key key + * @param pData cached data + * @param dataSize data size + * @param keepTime survival time in second + * @return cached element + */ +void *taosAddDataIntoCache(void *handle, char *key, char *pData, int dataSize, int keepTimeInSeconds); + +/** + * remove data in cache, the data will not be removed immediately. + * if it is referenced by other object, it will be remain in cache + * @param handle cache object + * @param data not the key, actually referenced data + * @param isForce force model, reduce the ref count and move the data into + * pTrash + */ +void taosRemoveDataFromCache(void *handle, void **data, bool isForce); + +/** + * update data in cache + * @param handle hash object handle(pointer) + * @param key key for hash + * @param pData actually data + * @param size length of data + * @param duration survival time of this object in cache + * @return new referenced data + */ +void *taosUpdateDataFromCache(void *handle, char *key, char *pData, int size, int duration); + +/** + * get data from cache + * @param handle cache object + * @param key key + * @return cached data or NULL + */ +void *taosGetDataFromCache(void *handle, char *key); + +/** + * release all allocated memory and destroy the cache object + * + * @param handle + */ +void taosCleanUpDataCache(void *handle); + +/** + * move all data node into trash,clear node in trash can if it is not referenced by client + * @param handle + */ +void taosClearDataCache(void *handle); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TCACHE_H diff --git a/src/inc/tchecksum.h b/src/inc/tchecksum.h new file mode 100644 index 000000000000..493c993c2554 --- /dev/null +++ b/src/inc/tchecksum.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TCHECKSUM_H +#define TDENGINE_TCHECKSUM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcrc32c.h" +#include "tutil.h" + +typedef uint32_t TSCKSUM; + +static FORCE_INLINE TSCKSUM taosCalcChecksum(TSCKSUM csi, const uint8_t *stream, uint32_t ssize) { + assert(ssize >= 0 && stream != NULL); + + return (*crc32c)(csi, stream, (size_t)ssize); +} + +static FORCE_INLINE int taosCalcChecksumAppend(TSCKSUM csi, uint8_t *stream, uint32_t ssize) { + if (ssize < sizeof(TSCKSUM)) return -1; + + *((TSCKSUM *)(stream + ssize - sizeof(TSCKSUM))) = (*crc32c)(csi, stream, (size_t)(ssize - sizeof(TSCKSUM))); + + return 0; +} + +static FORCE_INLINE int taosCheckChecksum(const uint8_t *stream, uint32_t ssize, TSCKSUM checksum) { + if (ssize < 0) return 0; + return (checksum == (*crc32c)(0, stream, (size_t)ssize)); +} + +static FORCE_INLINE int taosCheckChecksumWhole(const uint8_t *stream, uint32_t ssize) { + if (ssize < sizeof(TSCKSUM)) return 0; + return *((TSCKSUM *)(stream + ssize - sizeof(TSCKSUM))) == (*crc32c)(0, stream, (size_t)(ssize - sizeof(TSCKSUM))); +} + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TCHECKSUM_H diff --git a/src/inc/tcrc32c.h b/src/inc/tcrc32c.h new file mode 100644 index 000000000000..78e52e226fc2 --- /dev/null +++ b/src/inc/tcrc32c.h @@ -0,0 +1,44 @@ +/* + Copyright (c) 2013 - 2014, 2016 Mark Adler, Robert Vazan, Max Vysokikh + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + */ + +#ifndef TDENGINE_TCRC32C_H +#define TDENGINE_TCRC32C_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef const uint8_t *crc_stream; + +extern uint32_t (*crc32c)(uint32_t crci, crc_stream bytes, size_t len); + +uint32_t crc32c_sf(uint32_t crci, crc_stream input, size_t length); + +uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len); + +void taosResolveCRC(); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TCRC32C_H diff --git a/src/inc/textbuffer.h b/src/inc/textbuffer.h new file mode 100644 index 000000000000..72a985f4be2f --- /dev/null +++ b/src/inc/textbuffer.h @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TDENGINE_TEXTBUFFER_H +#define TDENGINE_TEXTBUFFER_H + +#ifdef __cplusplus +extern "C" { +#endif + +// TODO REFACTOR + +#include +#include +#include + +#include "tutil.h" + +#define DEFAULT_PAGE_SIZE 16384 // 16k larger than the SHistoInfo +#define MIN_BUFFER_SIZE (1 << 19) +#define MAX_TMPFILE_PATH_LENGTH 512 +#define INITIAL_ALLOCATION_BUFFER_SIZE 64 + +// forward declare +struct tTagSchema; + +typedef enum EXT_BUFFER_FLUSH_MODEL { + /* + * all data that have been flushed to disk is belonged to the same group + * which means, all data in disk are sorted, or order is not matter in this case + */ + SINGLE_APPEND_MODEL, + + /* + * each flush operation to disk is completely independant to any other flush operation + * we simply merge several set of data in one file, to reduce the count of flat files + * in disk. So in this case, we need to keep the flush-out information in tFlushoutInfo + * structure. + */ + MULTIPLE_APPEND_MODEL, +} EXT_BUFFER_FLUSH_MODEL; + +typedef struct tFlushoutInfo { + uint32_t startPageId; + uint32_t numOfPages; +} tFlushoutInfo; + +typedef struct tFlushoutData { + uint32_t nAllocSize; + uint32_t nLength; + tFlushoutInfo *pFlushoutInfo; +} tFlushoutData; + +typedef struct tFileMeta { + uint32_t nFileSize; // in pages + uint32_t nPageSize; + uint32_t numOfElemsInFile; + tFlushoutData flushoutData; +} tFileMeta; + +typedef struct tFilePage { + uint64_t numOfElems; + char data[]; +} tFilePage; + +typedef struct tFilePagesItem { + struct tFilePagesItem *pNext; + tFilePage item; +} tFilePagesItem; + +typedef struct tColModel { + int32_t maxCapacity; + int32_t numOfCols; + int16_t * colOffset; + struct SSchema *pFields; +} tColModel; + +typedef struct tOrderIdx { + int32_t numOfOrderedCols; + int16_t pData[]; +} tOrderIdx; + +typedef struct tOrderDescriptor { + union { + struct tTagSchema *pTagSchema; + tColModel * pSchema; + }; + int32_t tsOrder; // timestamp order type if exists + tOrderIdx orderIdx; +} tOrderDescriptor; + +typedef struct tExtMemBuffer { + int32_t nMaxSizeInPages; + + int32_t nElemSize; + int32_t nPageSize; + + int32_t numOfAllElems; + int32_t numOfElemsInBuffer; + int32_t numOfElemsPerPage; + + int16_t numOfPagesInMem; + tFilePagesItem *pHead; + tFilePagesItem *pTail; + + tFileMeta fileMeta; + + char dataFilePath[MAX_TMPFILE_PATH_LENGTH]; + FILE *dataFile; + + tColModel *pColModel; + + EXT_BUFFER_FLUSH_MODEL flushModel; +} tExtMemBuffer; + +void getExtTmpfilePath(const char *fileNamePattern, int64_t serialNumber, int32_t seg, int32_t slot, char *dstPath); + +/* + * create ext-memory buffer + */ +void tExtMemBufferCreate(tExtMemBuffer **pMemBuffer, int32_t numOfBufferSize, int32_t elemSize, + const char *tmpDataFilePath, tColModel *pModel); + +/* + * destroy ext-memory buffer + */ +void tExtMemBufferDestroy(tExtMemBuffer **pMemBuffer); + +/* + * @param pMemBuffer + * @param data input data pointer + * @param numOfRows number of rows in data + * @param pModel column format model + * @return number of pages in memory + */ +int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRows); + +/* + * flush all data into disk and release all in-memory buffer + */ +bool tExtMemBufferFlush(tExtMemBuffer *pMemBuffer); + +/* + * remove all data that has been put into buffer, including in buffer or + * ext-buffer(disk) + */ +void tExtMemBufferClear(tExtMemBuffer *pMemBuffer); + +/* + * this function should be removed. + * since the flush to disk operation is transparent to client this structure should provide stream operation for data, + * and there is an internal cursor point to the data. + */ +bool tExtMemBufferLoadData(tExtMemBuffer *pMemBuffer, tFilePage *pFilePage, int32_t flushIdx, int32_t pageIdx); + +bool tExtMemBufferIsAllDataInMem(tExtMemBuffer *pMemBuffer); + +tColModel *tColModelCreate(SSchema *field, int32_t numOfCols, int32_t maxCapacity); + +void tColModelDestroy(tColModel *pModel); + +typedef struct SSrcColumnInfo { + int32_t functionId; + int32_t type; +} SSrcColumnInfo; + +/* + * display data in column format model for debug purpose only + */ +void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t maxCount); + +void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32_t maxCount, SSrcColumnInfo *pInfo); + +/* + * compress data into consecutive block without hole in data + */ +void tColModelCompress(tColModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity); + +void tColModelErase(tColModel *pModel, tFilePage *inputBuffer, int32_t maxCapacity, int32_t s, int32_t e); + +tOrderDescriptor *tOrderDesCreate(int32_t *orderColIdx, int32_t numOfOrderCols, tColModel *pModel, int32_t tsOrderType); + +void tOrderDescDestroy(tOrderDescriptor *pDesc); + +void tColModelAppend(tColModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, + int32_t numOfRowsToWrite, int32_t srcCapacity); + +/////////////////////////////////////////////////////////////////////////////////////////////////////// +typedef struct MinMaxEntry { + union { + double dMinVal; + int32_t iMinVal; + int64_t i64MinVal; + }; + union { + double dMaxVal; + int32_t iMaxVal; + int64_t i64MaxVal; + }; +} MinMaxEntry; + +typedef struct tMemBucketSegment { + int32_t numOfSlots; + MinMaxEntry * pBoundingEntries; + tExtMemBuffer **pBuffer; +} tMemBucketSegment; + +typedef struct tMemBucket { + int16_t numOfSegs; + int16_t nTotalSlots; + int16_t nSlotsOfSeg; + int16_t dataType; + + int16_t nElemSize; + int32_t numOfElems; + + int32_t nTotalBufferSize; + int32_t maxElemsCapacity; + + int16_t nPageSize; + int16_t numOfTotalPages; + int16_t numOfAvailPages; /* remain available buffer pages */ + + tMemBucketSegment *pSegs; + tOrderDescriptor * pOrderDesc; + + MinMaxEntry nRange; + + void (*HashFunc)(struct tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); +} tMemBucket; + +typedef int (*__col_compar_fn_t)(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data); + +void tColDataQSort(tOrderDescriptor *, int32_t numOfRows, int32_t start, int32_t end, char *data, int32_t orderType); + +int32_t compare_sa(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data); + +int32_t compare_sd(tOrderDescriptor *, int32_t numOfRows, int32_t idx1, int32_t idx2, char *data); + +int32_t compare_a(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1, int32_t numOfRow2, int32_t s2, + char *data2); + +int32_t compare_d(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1, int32_t numOfRow2, int32_t s2, + char *data2); + +void tMemBucketCreate(tMemBucket **pBucket, int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, + int16_t dataType, tOrderDescriptor *pDesc); + +void tMemBucketDestroy(tMemBucket **pBucket); + +void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows); + +double getPercentile(tMemBucket *pMemBucket, double percent); + +void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); + +void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx); + +#ifdef __cplusplus +} +#endif + +#endif // TBASE_SORT_H diff --git a/src/inc/tglobalcfg.h b/src/inc/tglobalcfg.h new file mode 100644 index 000000000000..16cb4898d50a --- /dev/null +++ b/src/inc/tglobalcfg.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TGLOBALCFG_H +#define TDENGINE_TGLOBALCFG_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include "tsdb.h" + +// system info +extern int64_t tsPageSize; +extern int64_t tsOpenMax; +extern int64_t tsStreamMax; +extern int32_t tsNumOfCores; +extern int32_t tsTotalDiskGB; +extern int32_t tsTotalMemoryMB; +extern int32_t tsVersion; + +extern int tscEmbedded; + +extern int64_t tsMsPerDay[2]; + +extern char configDir[]; +extern char tsDirectory[]; +extern char dataDir[]; +extern char logDir[]; +extern char scriptDir[]; + +extern char tsMasterIp[]; +extern char tsSecondIp[]; +extern short tsMgmtVnodePort; +extern short tsMgmtShellPort; +extern short tsVnodeShellPort; +extern short tsVnodeVnodePort; +extern short tsMgmtMgmtPort; +extern short tsVnodeSyncPort; +extern short tsMgmtSyncPort; + +extern int tsStatusInterval; +extern int tsShellActivityTimer; +extern int tsVnodePeerHBTimer; +extern int tsMgmtPeerHBTimer; +extern int tsMeterMetaKeepTimer; +extern int tsMetricMetaKeepTimer; + +extern float tsNumOfThreadsPerCore; +extern float tsRatioOfQueryThreads; +extern char tsInternalIp[]; +extern int tsNumOfVnodesPerCore; +extern int tsNumOfTotalVnodes; +extern int tsShellsPerVnode; + +extern int tsSessionsPerVnode; +extern int tsAverageCacheBlocks; +extern int tsCacheBlockSize; + +extern int tsRowsInFileBlock; +extern float tsFileBlockMinPercent; + +extern short tsNumOfBlocksPerMeter; +extern int tsCommitTime; // seconds +extern int tsCommitLog; +extern int tsAsyncLog; +extern int tsCompression; +extern int tsDaysPerFile; +extern int tsDaysToKeep; +extern int tsReplications; + +extern int tsNumOfMPeers; +extern int tsMaxShellConns; +extern int tsMaxAccounts; +extern int tsMaxUsers; +extern int tsMaxDbs; +extern int tsMaxTables; +extern int tsMaxDnodes; +extern int tsMaxVGroups; +extern int tsShellActivityTimer; +extern char tsMgmtZone[]; + +extern char tsLocalIp[]; +extern char tsDefaultDB[]; +extern char tsDefaultUser[]; +extern char tsDefaultPass[]; +extern int tsMaxMeterConnections; +extern int tsMaxVnodeConnections; +extern int tsMaxMgmtConnections; + +extern int tsBalanceMonitorInterval; +extern int tsBalanceStartInterval; +extern int tsBalancePolicy; +extern int tsOfflineThreshold; +extern int tsMgmtEqualVnodeNum; + +extern int tsEnableHttpModule; +extern int tsEnableMonitorModule; + +extern int tsTimePrecision; +extern int tsMinSlidingTime; +extern int tsMinIntervalTime; +extern int tsMaxStreamComputDelay; +extern int tsStreamCompStartDelay; +extern int tsStreamCompRetryDelay; + +extern int tsProjectExecInterval; +extern int64_t tsMaxRetentWindow; + +extern char tsHttpIp[]; +extern short tsHttpPort; +extern int tsHttpCacheSessions; +extern int tsHttpSessionExpire; +extern int tsHttpMaxThreads; +extern int tsHttpEnableCompress; +extern int tsAdminRowLimit; + +extern char tsMonitorDbName[]; +extern char tsInternalPass[]; +extern int tsMonitorInterval; + +extern int tsNumOfLogLines; +extern int ddebugFlag; +extern int mdebugFlag; +extern int cdebugFlag; +extern int jnidebugFlag; +extern int tmrDebugFlag; +extern int sdbDebugFlag; +extern int httpDebugFlag; +extern int monitorDebugFlag; +extern int uDebugFlag; +extern int taosDebugFlag; +extern int debugFlag; +extern int odbcdebugFlag; +extern int qdebugFlag; + +extern int tsRpcTimer; +extern int tsRpcMaxTime; +extern int tsUdpDelay; +extern char version[]; +extern char compatible_version[]; +extern char gitinfo[]; +extern char buildinfo[]; + +extern char tsTimezone[64]; +extern char tsLocale[64]; +extern char tsCharset[64]; // default encode string + +// +void tsReadGlobalLogConfig(); +bool tsReadGlobalConfig(); +int tsCfgDynamicOptions(char *msg); +void tsPrintGlobalConfig(); +void tsPrintOsInfo(); +void tsSetAllDebugFlag(); +void tsSetTimeZone(); +void tsSetLocale(); +void tsInitGlobalConfig(); + +#define TSDB_CFG_CTYPE_B_CONFIG 1 // can be configured from file +#define TSDB_CFG_CTYPE_B_SHOW 2 // can displayed by "show configs" commands +#define TSDB_CFG_CTYPE_B_LOG 4 // is a log type configuration +#define TSDB_CFG_CTYPE_B_CLIENT 8 // can be displayed in the client log +#define TSDB_CFG_CTYPE_B_OPTION 16 // can be configured by taos_options function + +#define TSDB_CFG_CSTATUS_NONE 0 // not configured +#define TSDB_CFG_CSTATUS_DEFAULT 1 // use system default value +#define TSDB_CFG_CSTATUS_FILE 2 // configured from file +#define TSDB_CFG_CSTATUS_OPTION 3 // configured by taos_options function +#define TSDB_CFG_CSTATUS_ARG 4 // configured by program argument + +enum { + TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_VTYPE_INT, + TSDB_CFG_VTYPE_UINT, + TSDB_CFG_VTYPE_FLOAT, + TSDB_CFG_VTYPE_STRING, + TSDB_CFG_VTYPE_IPSTR, + TSDB_CFG_VTYPE_DIRECTORY, +}; + +enum { + TSDB_CFG_UTYPE_NONE, + TSDB_CFG_UTYPE_PERCENT, + TSDB_CFG_UTYPE_GB, + TSDB_CFG_UTYPE_MB, + TSDB_CFG_UTYPE_Mb, + TSDB_CFG_UTYPE_BYTE, + TSDB_CFG_UTYPE_SECOND, + TSDB_CFG_UTYPE_MS +}; + +typedef struct { + char * option; + void * ptr; + float minValue; + float maxValue; + int8_t cfgType; + int8_t cfgStatus; + int8_t unitType; + int8_t valType; + uint32_t ptrLength; +} SGlobalConfig; + +extern SGlobalConfig *tsGlobalConfig; +extern int tsGlobalConfigNum; +extern char * tsCfgStatusStr[]; +SGlobalConfig *tsGetConfigOption(char *option); + +#define TSDB_CFG_MAX_NUM 110 +#define TSDB_CFG_PRINT_LEN 23 +#define TSDB_CFG_OPTION_LEN 24 +#define TSDB_CFG_VALUE_LEN 41 + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TGLOBALCFG_H diff --git a/src/inc/thash.h b/src/inc/thash.h new file mode 100644 index 000000000000..592db62fe5e1 --- /dev/null +++ b/src/inc/thash.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_THASH_H +#define TDENGINE_THASH_H + +#ifdef __cplusplus +extern "C" { +#endif + +void *taosOpenHash(int maxSessions, int (*fp)(void *, uint64_t)); + +void taosCloseHash(void *handle); + +int taosAddHash(void *handle, uint64_t, uint32_t id); + +void taosDeleteHash(void *handle, uint64_t); + +int32_t taosGetIdFromHash(void *handle, uint64_t); + +int taosHashLong(void *, uint64_t ip); + +uint64_t taosHashUInt64(uint64_t handle); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_THASH_H diff --git a/src/inc/thistogram.h b/src/inc/thistogram.h new file mode 100644 index 000000000000..7e5b1ccac6c9 --- /dev/null +++ b/src/inc/thistogram.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HISTOGRAM_H +#define TDENGINE_HISTOGRAM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tskiplist.h" + +#define USE_ARRAYLIST + +#define MAX_HISTOGRAM_BIN 500 + +typedef struct SHistBin { + double val; + int64_t num; + +#if !defined(USE_ARRAYLIST) + double delta; + int32_t index; // index in min-heap list +#endif +} SHistBin; + +typedef struct SHeapEntry { + void* pData; + double val; +} SHeapEntry; + +typedef struct SHistogramInfo { + int32_t numOfElems; + int32_t numOfEntries; + int32_t maxEntries; + +#if defined(USE_ARRAYLIST) + SHistBin* elems; +#else + tSkipList* pList; + SLoserTreeInfo* pLoserTree; + int32_t maxIndex; + bool ordered; +#endif + + double min; + double max; +} SHistogramInfo; + +SHistogramInfo* tHistogramCreate(int32_t numOfBins); +SHistogramInfo* tHistogramCreateFrom(void* pBuf, int32_t numOfBins); + +int32_t tHistogramAdd(SHistogramInfo** pHisto, double val); +int64_t tHistogramSum(SHistogramInfo* pHisto, double v); + +double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num); +SHistogramInfo* tHistogramMerge(SHistogramInfo* pHisto1, SHistogramInfo* pHisto2, int32_t numOfEntries); +void tHistogramDestroy(SHistogramInfo** pHisto); + +void tHistogramPrint(SHistogramInfo* pHisto); + +int32_t vnodeHistobinarySearch(SHistBin* pEntry, int32_t len, double val); + +SHeapEntry* tHeapCreate(int32_t numOfEntries); +void tHeapSort(SHeapEntry* pEntry, int32_t len); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_HISTOGRAM_H diff --git a/src/inc/tidpool.h b/src/inc/tidpool.h new file mode 100644 index 000000000000..2b1ac2f2e6b7 --- /dev/null +++ b/src/inc/tidpool.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TIDPOOL_H +#define TDENGINE_TIDPOOL_H + +#ifdef __cplusplus +extern "C" { +#endif + +void *taosInitIdPool(int maxId); + +int taosAllocateId(void *handle); + +void taosFreeId(void *handle, int id); + +void taosIdPoolCleanUp(void *handle); + +int taosIdPoolNumOfUsed(void *handle); + +void taosIdPoolReinit(void *handle); + +void taosIdPoolMarkStatus(void *handle, int id, int status); + +void taosIdPoolSetFreeList(void *handle); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/tinterpolation.h b/src/inc/tinterpolation.h new file mode 100644 index 000000000000..3592664804e7 --- /dev/null +++ b/src/inc/tinterpolation.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TINTERPOLATION_H +#define TDENGINE_TINTERPOLATION_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct SInterpolationInfo { + int64_t startTimestamp; + int32_t order; // order [asc/desc] + int32_t numOfRawDataInRows; // number of points in pQuery->sdata + int32_t rowIdx; // rowIdx in pQuery->sdata + int32_t numOfTotalInterpo; // number of interpolated rows in one round + int32_t numOfCurrentInterpo; // number of interpolated rows in current results + char * prevValues; // previous row of data + char * nextValues; // next row of data + int32_t numOfTags; + char ** pTags; // tags value for current interoplation +} SInterpolationInfo; + +typedef struct SPoint { + int64_t key; + void * val; +} SPoint; + +typedef void (*__interpo_callback_fn_t)(void *param); + +int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char intervalTimeUnit); + +void taosInitInterpoInfo(SInterpolationInfo *pInterpoInfo, int32_t order, int64_t startTimeStamp, int32_t numOfTags, + int32_t rowSize); + +void taosInterpoSetStartInfo(SInterpolationInfo *pInterpoInfo, int32_t numOfRawDataInRows, int32_t type); + +TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int32_t timeInterval, int8_t intervalTimeUnit); + +/** + * + * @param pInterpoInfo + * @param pPrimaryKeyArray + * @param numOfRows + * @param nInterval + * @param ekey + * @param maxNumOfRows + * @return + */ +int32_t taosGetNumOfResultWithInterpo(SInterpolationInfo *pInterpoInfo, int64_t *pPrimaryKeyArray, int32_t numOfRows, + int64_t nInterval, int64_t ekey, int32_t maxNumOfRows); + +int32_t taosGetNumOfResWithoutLimit(SInterpolationInfo *pInterpoInfo, int64_t *pPrimaryKeyArray, + int32_t numOfRawDataInRows, int64_t nInterval, int64_t ekey); +/** + * + * @param pInterpoInfo + * @return + */ +bool taosHasNoneInterpoPoints(SInterpolationInfo *pInterpoInfo); + +int32_t taosNumOfRemainPoints(SInterpolationInfo *pInterpoInfo); + +/** + * + */ +int32_t taosDoInterpoResult(SInterpolationInfo *pInterpoInfo, int16_t interpoType, tFilePage **data, + int32_t numOfRawDataInRows, int32_t outputRows, int64_t nInterval, + int64_t *pPrimaryKeyArray, tColModel *pModel, char **srcData, int64_t *defaultVal, + int32_t *functionIDs, int32_t bufSize); + +int taosDoLinearInterpolation(int32_t type, SPoint *point1, SPoint *point2, SPoint *point); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TINTERPOLATION_H diff --git a/src/inc/tkey.h b/src/inc/tkey.h new file mode 100644 index 000000000000..e4917423860f --- /dev/null +++ b/src/inc/tkey.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TAOS_KEY_H_ +#define _TAOS_KEY_H_ + +#include +#include +#include "tlog.h" +#include "tmd5.h" +#include "tutil.h" + +unsigned char *base64_decode(const char *value, int inlen, int *outlen); +char *base64_encode(const unsigned char *value, int vlen); +char *taosDesEncode(int64_t key, char *src, int len); +char *taosDesDecode(int64_t key, char *src, int len); + +#endif diff --git a/src/inc/tlog.h b/src/inc/tlog.h new file mode 100644 index 000000000000..550f5fb41859 --- /dev/null +++ b/src/inc/tlog.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TLOG_H +#define TDENGINE_TLOG_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include "tglobalcfg.h" + +#define DEBUG_ERROR 1 +#define DEBUG_WARN 2 +#define DEBUG_TRACE 4 +#define DEBUG_DUMP 8 + +#define DEBUG_FILE 0x80 +#define DEBUG_SCREEN 0x40 + +extern int uDebugFlag; + +extern void (*taosLogFp)(int level, const char *const format, ...); + +extern void (*taosLogSqlFp)(char *sql); + +extern void (*taosLogAcctFp)(char *acctId, int64_t currentPointsPerSecond, int64_t maxPointsPerSecond, + int64_t totalTimeSeries, int64_t maxTimeSeries, int64_t totalStorage, int64_t maxStorage, + int64_t totalQueryTime, int64_t maxQueryTime, int64_t totalInbound, int64_t maxInbound, + int64_t totalOutbound, int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs, + int64_t totalUsers, int64_t maxUsers, int64_t totalStreams, int64_t maxStreams, + int64_t totalConns, int64_t maxConns, int8_t accessState); + +int taosInitLog(char *logName, int numOfLogLines, int maxFiles); + +void taosCloseLogger(); + +void taosDumpData(unsigned char *msg, int len); + +int taosOpenLogFile(char *fn); + +void tprintf(const char *const flags, int dflag, const char *const format, ...); + +void taosPrintLongString(const char *const flags, int dflag, const char *const format, ...); + +int taosOpenLogFileWithMaxLines(char *fn, int maxLines, int maxFileNum); + +void taosCloseLog(); + +void taosResetLogFile(); + +#define taosLogError(...) \ + if (taosLogFp) { \ + (*taosLogFp)(2, __VA_ARGS__); \ + } +#define taosLogWarn(...) \ + if (taosLogFp) { \ + (*taosLogFp)(1, __VA_ARGS__); \ + } +#define taosLogPrint(...) \ + if (taosLogFp) { \ + (*taosLogFp)(0, __VA_ARGS__); \ + } + +// utility log function +#define pError(...) \ + if (uDebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR UTL ", 255, __VA_ARGS__); \ + } +#define pWarn(...) \ + if (uDebugFlag & DEBUG_WARN) { \ + tprintf("WARN UTL ", uDebugFlag, __VA_ARGS__); \ + } +#define pTrace(...) \ + if (uDebugFlag & DEBUG_TRACE) { \ + tprintf("UTL ", uDebugFlag, __VA_ARGS__); \ + } +#define pDump(x, y) \ + if (uDebugFlag & DEBUG_DUMP) { \ + taosDumpData(x, y); \ + } + +#define pPrint(...) \ + { tprintf("UTL ", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); } + +// client log function +extern int cdebugFlag; + +#define tscError(...) \ + if (cdebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR TSC ", cdebugFlag, __VA_ARGS__); \ + } +#define tscWarn(...) \ + if (cdebugFlag & DEBUG_WARN) { \ + tprintf("WARN TSC ", cdebugFlag, __VA_ARGS__); \ + } +#define tscTrace(...) \ + if (cdebugFlag & DEBUG_TRACE) { \ + tprintf("TSC ", cdebugFlag, __VA_ARGS__); \ + } +#define tscPrint(...) \ + { tprintf("TSC ", 255, __VA_ARGS__); } + +#define jniError(...) \ + if (jnidebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR JNI ", jnidebugFlag, __VA_ARGS__); \ + } +#define jniWarn(...) \ + if (jnidebugFlag & DEBUG_WARN) { \ + tprintf("WARN JNI ", jnidebugFlag, __VA_ARGS__); \ + } +#define jniTrace(...) \ + if (jnidebugFlag & DEBUG_TRACE) { \ + tprintf("JNI ", jnidebugFlag, __VA_ARGS__); \ + } +#define jniPrint(...) \ + { tprintf("JNI ", 255, __VA_ARGS__); } + +// rpc log function +extern int taosDebugFlag; +#define tError(...) \ + if (taosDebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR RPC ", taosDebugFlag, __VA_ARGS__); \ + } +#define tWarn(...) \ + if (taosDebugFlag & DEBUG_WARN) { \ + tprintf("WARN RPC ", taosDebugFlag, __VA_ARGS__); \ + } +#define tTrace(...) \ + if (taosDebugFlag & DEBUG_TRACE) { \ + tprintf("RPC ", taosDebugFlag, __VA_ARGS__); \ + } +#define tPrint(...) \ + { tprintf("RPC ", 255, __VA_ARGS__); } +#define tDump(x, y) \ + if (taosDebugFlag & DEBUG_DUMP) { \ + taosDumpData((unsigned char *)x, y); \ + } + +// dnode log function +#define dError(...) \ + if (ddebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR DND ", 255, __VA_ARGS__); \ + } +#define dWarn(...) \ + if (ddebugFlag & DEBUG_WARN) { \ + tprintf("WARN DND ", ddebugFlag, __VA_ARGS__); \ + } +#define dTrace(...) \ + if (ddebugFlag & DEBUG_TRACE) { \ + tprintf("DND ", ddebugFlag, __VA_ARGS__); \ + } +#define dPrint(...) \ + { tprintf("DND ", 255, __VA_ARGS__); } + +#define dLError(...) taosLogError(__VA_ARGS__) dError(__VA_ARGS__) +#define dLWarn(...) taosLogWarn(__VA_ARGS__) dWarn(__VA_ARGS__) +#define dLPrint(...) taosLogPrint(__VA_ARGS__) dPrint(__VA_ARGS__) + +#define qTrace(...) \ + if (qdebugFlag & DEBUG_TRACE) { \ + tprintf("DND QRY ", qdebugFlag, __VA_ARGS__); \ + } + +// mnode log function +#define mError(...) \ + if (mdebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR MND ", 255, __VA_ARGS__); \ + } +#define mWarn(...) \ + if (mdebugFlag & DEBUG_WARN) { \ + tprintf("WARN MND ", mdebugFlag, __VA_ARGS__); \ + } +#define mTrace(...) \ + if (mdebugFlag & DEBUG_TRACE) { \ + tprintf("MND ", mdebugFlag, __VA_ARGS__); \ + } +#define mPrint(...) \ + { tprintf("MND ", 255, __VA_ARGS__); } + +#define mLError(...) taosLogError(__VA_ARGS__) mError(__VA_ARGS__) +#define mLWarn(...) taosLogWarn(__VA_ARGS__) mWarn(__VA_ARGS__) +#define mLPrint(...) taosLogPrint(__VA_ARGS__) mPrint(__VA_ARGS__) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/tlosertree.h b/src/inc/tlosertree.h new file mode 100644 index 000000000000..fb64fd2ee435 --- /dev/null +++ b/src/inc/tlosertree.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TLOSERTREE_H +#define TDENGINE_TLOSERTREE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef int (*__merge_compare_fn_t)(const void *, const void *, void *param); + +typedef struct SLoserTreeNode { + int32_t index; + void * pData; +} SLoserTreeNode; + +typedef struct SLoserTreeInfo { + int32_t numOfEntries; + int32_t totalEntries; + __merge_compare_fn_t comparaFn; + void * param; + + SLoserTreeNode *pNode; +} SLoserTreeInfo; + +uint8_t tLoserTreeCreate(SLoserTreeInfo **pTree, int32_t numOfEntries, void *param, __merge_compare_fn_t compareFn); + +void tLoserTreeInit(SLoserTreeInfo *pTree); + +void tLoserTreeAdjust(SLoserTreeInfo *pTree, int32_t idx); + +void tLoserTreeRebuild(SLoserTreeInfo *pTree); + +void tLoserTreeDisplay(SLoserTreeInfo *pTree); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TLOSERTREE_H diff --git a/src/inc/tmd5.h b/src/inc/tmd5.h new file mode 100644 index 000000000000..d7fd038f37c9 --- /dev/null +++ b/src/inc/tmd5.h @@ -0,0 +1,41 @@ +/* + *********************************************************************** + ** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. ** + ** ** + ** License to copy and use this software is granted provided that ** + ** it is identified as the "RSA Data Security, Inc. MD5 Message- ** + ** Digest Algorithm" in all material mentioning or referencing this ** + ** software or this function. ** + ** ** + ** License is also granted to make and use derivative works ** + ** provided that such works are identified as "derived from the RSA ** + ** Data Security, Inc. MD5 Message-Digest Algorithm" in all ** + ** material mentioning or referencing the derived work. ** + ** ** + ** RSA Data Security, Inc. makes no representations concerning ** + ** either the merchantability of this software or the suitability ** + ** of this software for any particular purpose. It is provided "as ** + ** is" without express or implied warranty of any kind. ** + ** ** + ** These notices must be retained in any copies of any part of this ** + ** documentation and/or software. ** + *********************************************************************** + */ + +#ifndef _taos_md5_header_ +#define _taos_md5_header_ + +#include + +typedef struct { + uint32_t i[2]; /* number of _bits_ handled mod 2^64 */ + uint32_t buf[4]; /* scratch buffer */ + uint8_t in[64]; /* input buffer */ + uint8_t digest[16]; /* actual digest after MD5Final call */ +} MD5_CTX; + +void MD5Init(MD5_CTX *mdContext); +void MD5Update(MD5_CTX *mdContext, uint8_t *inBuf, unsigned int inLen); +void MD5Final(MD5_CTX *mdContext); + +#endif diff --git a/src/inc/tmempool.h b/src/inc/tmempool.h new file mode 100644 index 000000000000..f2c6a0ef006c --- /dev/null +++ b/src/inc/tmempool.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TDENGINE_TMEMPOOL_H +#define TDENGINE_TMEMPOOL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define mpool_h void * + +mpool_h taosMemPoolInit(int maxNum, int blockSize); + +char *taosMemPoolMalloc(mpool_h handle); + +void taosMemPoolFree(mpool_h handle, char *p); + +void taosMemPoolCleanUp(mpool_h handle); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/tmodule.h b/src/inc/tmodule.h new file mode 100644 index 000000000000..943ce7102990 --- /dev/null +++ b/src/inc/tmodule.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TMODULE_H +#define TDENGINE_TMODULE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +typedef struct _msg_header { + int mid; /* message ID */ + int cid; /* call ID */ + int tid; /* transaction ID */ + // int len; /* length of msg */ + char *msg; /* content holder */ +} msg_header_t, msg_t; + +typedef struct { + char * name; /* module name */ + pthread_t thread; /* thread ID */ + sem_t emptySem; + sem_t fullSem; + int fullSlot; + int emptySlot; + int debugFlag; + int queueSize; + int msgSize; + pthread_mutex_t queueMutex; + pthread_mutex_t stmMutex; + msg_t * queue; + + int (*processMsg)(msg_t *); + + int (*init)(); + + void (*cleanUp)(); +} module_t; + +typedef struct { + short len; + unsigned char data[0]; +} sim_data_t; + +extern int maxCid; +extern module_t moduleObj[]; +extern char * msgName[]; + +extern int taosSendMsgToModule(module_t *mod_p, int cid, int mid, int tid, char *msg); + +extern char *taosDisplayModuleStatus(int moduleNum); + +extern int taosInitModule(module_t *); + +extern void taosCleanUpModule(module_t *); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/trpc.h b/src/inc/trpc.h new file mode 100644 index 000000000000..18be516ed59f --- /dev/null +++ b/src/inc/trpc.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TDENGINE_TRPC_H +#define TDENGINE_TRPC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taosmsg.h" +#include "tsched.h" + +#define TAOS_CONN_UDPS 0 +#define TAOS_CONN_UDPC 1 +#define TAOS_CONN_UDP 1 +#define TAOS_CONN_TCPS 2 +#define TAOS_CONN_TCPC 3 +#define TAOS_CONN_HTTPS 4 +#define TAOS_CONN_HTTPC 5 + +#define TAOS_ID_ASSIGNED 0 +#define TAOS_ID_FREE 1 +#define TAOS_ID_REALLOCATE 2 + +#define taosSendMsgToPeer(x, y, z) taosSendMsgToPeerH(x, y, z, NULL) +#define taosBuildReqMsg(x, y) taosBuildReqMsgWithSize(x, y, 512) +#define taosBuildRspMsg(x, y) taosBuildRspMsgWithSize(x, y, 512) + +typedef struct { + char *localIp; // local IP used + short localPort; // local port + char *label; // for debug purpose + int numOfThreads; // number of threads to handle connections + void *(*fp)(char *, void *, void *); // function to process the incoming msg + void *qhandle; // queue handle + int bits; // number of bits for sessionId + int numOfChanns; // number of channels + int sessionsPerChann; // number of sessions per channel + int idMgmt; // TAOS_ID_ASSIGNED, TAOS_ID_FREE + int connType; // TAOS_CONN_UDP, TAOS_CONN_TCPC, TAOS_CONN_TCPS + int idleTime; // milliseconds, 0 means idle timer is disabled + int noFree; // not free buffer + void (*efp)(int cid); // call back function to process not activated chann + int (*afp)(char *meterId, char *spi, char *encrypt, uint8_t *secret, + uint8_t *ckey); // call back to retrieve auth info +} SRpcInit; + +typedef struct { + int cid; // channel ID + int sid; // session ID + char * meterId; // meter ID + uint32_t peerId; // peer link ID + void * shandle; // pointer returned by taosOpenRpc + void * ahandle; // handle provided by app + char * peerIp; // peer IP string + short peerPort; // peer port + char spi; // security parameter index + char encrypt; // encrypt algorithm + char * secret; // key for authentication + char * ckey; // ciphering key +} SRpcConnInit; + +extern int taosDebugFlag; +extern int tsRpcHeadSize; + +void *taosOpenRpc(SRpcInit *pRpc); + +void taosCloseRpc(void *); + +int taosOpenRpcChann(void *handle, int cid, int sessions); + +void taosCloseRpcChann(void *handle, int cid); + +void *taosOpenRpcConn(SRpcConnInit *pInit, uint8_t *code); + +void taosCloseRpcConn(void *thandle); + +void taosStopRpcConn(void *thandle); + +int taosSendMsgToPeerH(void *thandle, char *pCont, int contLen, void *ahandle); + +char *taosBuildReqHeader(void *param, char type, char *msg); + +char *taosBuildReqMsgWithSize(void *, char type, int size); + +char *taosBuildRspMsgWithSize(void *, char type, int size); + +int taosSendSimpleRsp(void *thandle, char rsptype, char code); + +int taosSetSecurityInfo(int cid, int sid, char *id, int spi, int encrypt, char *secret, char *ckey); + +void taosGetRpcConnInfo(void *thandle, uint32_t *peerId, uint32_t *peerIp, short *peerPort, int *cid, int *sid); + +int taosGetOutType(void *thandle); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TRPC_H diff --git a/src/inc/tsched.h b/src/inc/tsched.h new file mode 100644 index 000000000000..c430fb3caf2c --- /dev/null +++ b/src/inc/tsched.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCHED_H +#define TDENGINE_TSCHED_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct _sched_msg { + void (*fp)(struct _sched_msg *); + + void (*tfp)(void *, void *); + + char *msg; + void *ahandle; + void *thandle; +} SSchedMsg; + +void *taosInitScheduler(int queueSize, int numOfThreads, char *label); + +int taosScheduleTask(void *qhandle, SSchedMsg *pMsg); + +void taosCleanUpScheduler(void *param); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCHED_H diff --git a/src/inc/tschemautil.h b/src/inc/tschemautil.h new file mode 100644 index 000000000000..96a3cb804780 --- /dev/null +++ b/src/inc/tschemautil.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCHEMAUTIL_H +#define TDENGINE_TSCHEMAUTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "taosmsg.h" + +#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS) + +struct SSchema; + +/** + * check if the schema is valid or not, including following aspects: + * 1. number of columns + * 2. column types + * 3. column length + * 4. column names + * 5. total length + * + * @param pSchema + * @param numOfCols + * @return + */ +bool isValidSchema(struct SSchema *pSchema, int32_t numOfCols); + +struct SSchema *tsGetSchema(SMeterMeta *pMeta); + +struct SSchema *tsGetTagSchema(SMeterMeta *pMeta); + +struct SSchema *tsGetSchemaColIdx(SMeterMeta *pMeta, int32_t startCol); + +char *tsGetTagsValue(SMeterMeta *pMeta); + +bool tsMeterMetaIdentical(SMeterMeta *p1, SMeterMeta *p2); + +void extractMeterName(char *meterId, char *name); + +void extractDBName(char *meterId, char *name); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCHEMAUTIL_H diff --git a/src/inc/tscompression.h b/src/inc/tscompression.h new file mode 100644 index 000000000000..71c25eed3f6c --- /dev/null +++ b/src/inc/tscompression.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSCOMPRESSION_H +#define TDENGINE_TSCOMPRESSION_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tsdb.h" + +#define BITS_PER_BYTE 8 +// Masks +#define INT64MASK(_x) ((1ul << _x) - 1) +#define INT32MASK(_x) (((uint32_t)1 << _x) - 1) +#define INT8MASK(_x) (((uint8_t)1 << _x) - 1) +// Compression algorithm +#define NO_COMPRESSION 0 +#define ONE_STAGE_COMP 1 +#define TWO_STAGE_COMP 2 + +int tsCompressTinyint(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorithm, + char* const buffer, int bufferSize); +int tsCompressSmallint(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); +int tsCompressInt(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); +int tsCompressBigint(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); +int tsCompressBool(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorithm, + char* const buffer, int bufferSize); +int tsCompressString(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); +int tsCompressFloat(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); +int tsCompressDouble(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); +int tsCompressTimestamp(const char* const input, int inputSize, const int nelements, char* const output, int outputSize, char algorith, + char* const buffer, int bufferSize); + +int tsDecompressTinyint(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); +int tsDecompressSmallint(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); +int tsDecompressInt(const char* const input, int compressedSize, const int nelements, char* const output, int outputSize, + char algorithm, char* const buffer, int bufferSize); +int tsDecompressBigint(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); +int tsDecompressBool(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); +int tsDecompressString(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); +int tsDecompressFloat(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); +int tsDecompressDouble(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorith, char* const buffer, int bufferSize); +int tsDecompressTimestamp(const char* const input, int compressedSize, const int nelements, char* const output, + int outputSize, char algorithm, char* const buffer, int bufferSize); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSCOMPRESSION_H \ No newline at end of file diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h new file mode 100644 index 000000000000..7f6d7d306038 --- /dev/null +++ b/src/inc/tsdb.h @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _tsdb_global_header_ +#define _tsdb_global_header_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "tglobalcfg.h" + +#define TSDB__packed + +#ifdef TSKEY32 +#define TSKEY int32_t; +#else +#define TSKEY int64_t +#endif + +#define TSDB_TRUE 1 +#define TSDB_FALSE 0 +#define TSDB_OK 0 +#define TSDB_ERR -1 + +#define TS_PATH_DELIMITER "." + +#define TSDB_TIME_PRECISION_MILLI 0 +#define TSDB_TIME_PRECISION_MICRO 1 + +#define TSDB_TIME_PRECISION_MILLI_STR "ms" +#define TSDB_TIME_PRECISION_MICRO_STR "us" + +enum _status { + TSDB_STATUS_OFFLINE, + TSDB_STATUS_CREATING, + TSDB_STATUS_UNSYNCED, + TSDB_STATUS_SLAVE, + TSDB_STATUS_MASTER, + TSDB_STATUS_READY, +}; + +enum _syncstatus { + STDB_SSTATUS_INIT, + TSDB_SSTATUS_SYNCING, + TSDB_SSTATUS_SYNC_CACHE, + TSDB_SSTATUS_SYNC_FILE, +}; + +#define TSDB_DATA_TYPE_BOOL 1 // 1 bytes +#define TSDB_DATA_TYPE_TINYINT 2 // 1 byte +#define TSDB_DATA_TYPE_SMALLINT 3 // 2 bytes +#define TSDB_DATA_TYPE_INT 4 // 4 bytes +#define TSDB_DATA_TYPE_BIGINT 5 // 8 bytes +#define TSDB_DATA_TYPE_FLOAT 6 // 4 bytes +#define TSDB_DATA_TYPE_DOUBLE 7 // 8 bytes +#define TSDB_DATA_TYPE_BINARY 8 // string +#define TSDB_DATA_TYPE_TIMESTAMP 9 // 8 bytes +#define TSDB_DATA_TYPE_NCHAR 10 // wide string + +#define TSDB_KEYSIZE sizeof(TSKEY) +#define TSDB_NCHAR_SIZE sizeof(wchar_t) + +#define TSDB_RELATION_LESS 1 +#define TSDB_RELATION_LARGE 2 +#define TSDB_RELATION_EQUAL 3 +#define TSDB_RELATION_LESS_EQUAL 4 +#define TSDB_RELATION_LARGE_EQUAL 5 +#define TSDB_RELATION_NOT_EQUAL 6 +#define TSDB_RELATION_LIKE 7 + +#define TSDB_RELATION_AND 8 +#define TSDB_RELATION_OR 9 +#define TSDB_RELATION_NOT 10 + +#define TSDB_BINARY_OP_ADD 11 +#define TSDB_BINARY_OP_SUBTRACT 12 +#define TSDB_BINARY_OP_MULTIPLY 13 +#define TSDB_BINARY_OP_DIVIDE 14 +#define TSDB_BINARY_OP_REMAINDER 15 + +#define TSDB_USERID_LEN 9 +#define TS_PATH_DELIMITER_LEN 1 + +#define TSDB_METER_ID_LEN_MARGIN 10 +#define TSDB_METER_ID_LEN \ + (TSDB_DB_NAME_LEN + TSDB_METER_NAME_LEN + 2 * TS_PATH_DELIMITER_LEN + TSDB_USERID_LEN + \ + TSDB_METER_ID_LEN_MARGIN) // TSDB_DB_NAME_LEN+TSDB_METER_NAME_LEN+2*strlen(TS_PATH_DELIMITER)+strlen(USERID) +#define TSDB_UNI_LEN 24 +#define TSDB_USER_LEN TSDB_UNI_LEN +#define TSDB_ACCT_LEN TSDB_UNI_LEN +#define TSDB_PASSWORD_LEN TSDB_UNI_LEN + +#define TSDB_MAX_COLUMNS 256 +#define TSDB_MIN_COLUMNS 2 // PRIMARY COLUMN(timestamp) + other columns + +#define TSDB_METER_NAME_LEN 64 +#define TSDB_DB_NAME_LEN 32 +#define TSDB_COL_NAME_LEN 64 +#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 16 +#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE + +#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 16 +#define TSDB_MAX_TAGS_LEN 512 +#define TSDB_MAX_TAGS 6 + +#define TSDB_AUTH_LEN 16 +#define TSDB_KEY_LEN 16 +#define TSDB_VERSION_LEN 12 +#define TSDB_STREET_LEN 64 +#define TSDB_CITY_LEN 20 +#define TSDB_STATE_LEN 20 +#define TSDB_COUNTRY_LEN 20 +#define TSDB_VNODES_SUPPORT 6 +#define TSDB_MGMT_SUPPORT 4 +#define TSDB_LOCALE_LEN 64 +#define TSDB_TIMEZONE_LEN 64 + +#define TSDB_IPv4ADDR_LEN 16 +#define TSDB_FILENAME_LEN 128 +#define TSDB_METER_VNODE_BITS 20 +#define TSDB_METER_SID_MASK 0xFFFFF +#define TSDB_SHELL_VNODE_BITS 24 +#define TSDB_SHELL_SID_MASK 0xFF +#define TSDB_HTTP_TOKEN_LEN 20 +#define TSDB_SHOW_SQL_LEN 32 + +#define TSDB_METER_STATE_OFFLINE 0 +#define TSDB_METER_STATE_ONLLINE 1 + +#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE + +#define TSDB_PAYLOAD_SIZE (TSDB_DEFAULT_PKT_SIZE - 100) +#define TSDB_DEFAULT_PAYLOAD_SIZE 1024 // default payload size +#define TSDB_EXTRA_PAYLOAD_SIZE 128 // extra bytes for auth +#define TSDB_SQLCMD_SIZE 1024 +#define TSDB_MAX_VNODES 256 +#define TSDB_MIN_VNODES 50 +#define TSDB_INVALID_VNODE_NUM 0 + +#define TSDB_DNODE_ROLE_ANY 0 +#define TSDB_DNODE_ROLE_MGMT 1 +#define TSDB_DNODE_ROLE_VNODE 2 + +#define TSDB_MAX_MPEERS 5 +#define TSDB_MAX_MGMT_IPS (TSDB_MAX_MPEERS + 1) + +#define TSDB_REPLICA_MAX_NUM 3 +#define TSDB_REPLICA_MIN_NUM 1 + +// default value == 10 +#define TSDB_FILE_MIN_PARTITION_RANGE 1 // minimum partition range of vnode file in days +#define TSDB_FILE_MAX_PARTITION_RANGE 3650 // max partition range of vnode file in days + +#define TSDB_DATA_MIN_RESERVE_DAY 1 // data in db to be reserved. +#define TSDB_DATA_DEFAULT_RESERVE_DAY 3650 // ten years + +#define TSDB_MIN_COMPRESSION_LEVEL 0 +#define TSDB_MAX_COMPRESSION_LEVEL 2 + +#define TSDB_MIN_CACHE_BLOCKS_PER_METER 32 +#define TSDB_MAX_CACHE_BLOCKS_PER_METER 40960 + +#define TSDB_MIN_COMMIT_TIME_INTERVAL 30 +#define TSDB_MAX_COMMIT_TIME_INTERVAL 40960 + +#define TSDB_MIN_ROWS_IN_FILEBLOCK 200 +#define TSDB_MAX_ROWS_IN_FILEBLOCK 500000 + +#define TSDB_MIN_CACHE_BLOCK_SIZE 100 +#define TSDB_MAX_CACHE_BLOCK_SIZE 104857600 + +#define TSDB_MIN_CACHE_BLOCKS 100 +#define TSDB_MAX_CACHE_BLOCKS 409600 + +#define TSDB_MAX_AVG_BLOCKS 2048 + +#define TSDB_MIN_TABLES_PER_VNODE 1 +#define TSDB_MAX_TABLES_PER_VNODE 220000 + +#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW - TSDB_KEYSIZE) +#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW - TSDB_KEYSIZE) +#define PRIMARYKEY_TIMESTAMP_COL_INDEX 0 + +#define TSDB_DATA_BOOL_NULL 0x02 +#define TSDB_DATA_TINYINT_NULL 0x80 +#define TSDB_DATA_SMALLINT_NULL 0x8000 +#define TSDB_DATA_INT_NULL 0x80000000 +#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L + +#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN +#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000L // an NAN +#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF +#define TSDB_DATA_BINARY_NULL 0xFF + +#define TSDB_DATA_NULL_STR "NULL" +#define TSDB_DATA_NULL_STR_L "null" + +#define TSDB_MAX_RPC_THREADS 5 + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/tskiplist.h b/src/inc/tskiplist.h new file mode 100644 index 000000000000..b518b1d6713e --- /dev/null +++ b/src/inc/tskiplist.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TBASE_TSKIPLIST_H +#define TBASE_TSKIPLIST_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_SKIP_LIST_LEVEL 20 + +#include +#include +#include +#include + +#include "ttypes.h" + +/* + * generate random data with uniform&skewed distribution, extracted from levelDB + */ +typedef struct SRandom { + uint32_t s; + + uint32_t (*rand)(struct SRandom *, int32_t n); +} SRandom; + +/* + * key of each node + * todo move to as the global structure in all search codes... + */ + +const static size_t SKIP_LIST_STR_KEY_LENGTH_THRESHOLD = 15; +typedef tVariant tSkipListKey; + +typedef enum tSkipListPointQueryType { + INCLUDE_POINT_QUERY, + EXCLUDE_POINT_QUERY, +} tSkipListPointQueryType; + +typedef struct tSkipListNode { + uint16_t nLevel; + char * pData; + tSkipListKey key; + + struct tSkipListNode **pForward; + struct tSkipListNode **pBackward; +} tSkipListNode; + +/* + * @version 0.2 + * @date 2017/11/12 + * @author liaohj + * the simple version of SkipList. + * for multi-thread safe purpose, we employ pthread_rwlock_t to guarantee to + * generate + * deterministic result. Later, we will remove the lock in SkipList to further + * enhance the performance. In this case, one should use the concurrent skip + * list (by + * using michael-scott algorithm) instead of this simple version in a + * multi-thread + * environment, to achieve higher performance of read/write operations. + * + * Note: Duplicated primary key situation. + * In case of duplicated primary key, two ways can be employed to handle this + * situation: + * 1. add as normal insertion with out special process. + * 2. add an overflow pointer at each list node, all nodes with the same key + * will be added + * in the overflow pointer. In this case, the total steps of each search will + * be reduced significantly. + * Currently, we implement the skip list in a line with the first means, + * maybe refactor it soon. + * Memory consumption: the memory alignment causes many memory wasted. So, + * employ a memory + * pool will significantly reduce the total memory consumption, as well as the + * calloc/malloc + * operation costs. + * + * 3. use the iterator pattern to refactor all routines to make it more clean + */ + +// state struct, record following information: +// number of links in each level. +// avg search steps, for latest 1000 queries +// avg search rsp time, for latest 1000 queries +// total memory size +typedef struct tSkipListState { + // in bytes, sizeof(tSkipList)+sizeof(tSkipListNode)*tSkipList->nSize + uint64_t nTotalMemSize; + uint64_t nLevelNodeCnt[MAX_SKIP_LIST_LEVEL]; + + uint64_t queryCount; // total query count + + /* + * only record latest 1000 queries + * when the value==1000, = 0, + * nTotalStepsForQueries = 0, + * nTotalElapsedTimeForQueries = 0 + */ + uint64_t nRecQueries; + uint16_t nTotalStepsForQueries; + uint64_t nTotalElapsedTimeForQueries; + + uint16_t nInsertObjs; + uint16_t nTotalStepsForInsert; + uint64_t nTotalElapsedTimeForInsert; +} tSkipListState; + +typedef struct tSkipList { + tSkipListNode pHead; + uint64_t nSize; + + uint16_t nMaxLevel; + uint16_t nLevel; + + uint16_t keyType; + uint16_t nMaxKeyLen; + + __compar_fn_t comparator; + pthread_rwlock_t lock; // will be removed soon + + // random generator + SRandom r; + + // skiplist state + tSkipListState state; +} tSkipList; + +/* + * query condition structure to denote the range query + * //todo merge the point query cond with range query condition + */ +typedef struct tSKipListQueryCond { + // when the upper bounding == lower bounding, it is a point query + tSkipListKey lowerBnd; + tSkipListKey upperBnd; + + int32_t lowerBndRelOptr; // relation operator to denote if lower bound is + // included or not + int32_t upperBndRelOptr; +} tSKipListQueryCond; + +int32_t tSkipListCreate(tSkipList **pSkipList, int16_t nMaxLevel, int16_t keyType, int16_t nMaxKeyLen, + int32_t (*funcp)()); + +void tSkipListDestroy(tSkipList **pSkipList); + +// create skip list key +tSkipListKey tSkipListCreateKey(int32_t type, char *val, size_t keyLength); + +// destroy skip list key +void tSkipListDestroyKey(tSkipListKey *pKey); + +// put data into skiplist +tSkipListNode *tSkipListPut(tSkipList *pSkipList, void *pData, tSkipListKey *pKey, int32_t insertIdenticalKey); + +/* + * get only *one* node of which key is equalled to pKey, even there are more + * than + * one nodes are of the same key + */ +tSkipListNode *tSkipListGetOne(tSkipList *pSkipList, tSkipListKey *pKey); + +/* + * get all data with the same keys + */ +int32_t tSkipListGets(tSkipList *pSkipList, tSkipListKey *pKey, tSkipListNode ***pRes); + +int32_t tSkipListIterateList(tSkipList *pSkipList, tSkipListNode ***pRes, bool (*fp)(tSkipListNode *, void *), + void *param); + +/* + * remove only one node of the pKey value. + * If more than one node has the same value, any one will be removed + * + * @Return + * true: one node has been removed + * false: no node has been removed + */ +bool tSkipListRemove(tSkipList *pSkipList, tSkipListKey *pKey); + +/* + * remove the specified node in parameters + */ +void tSkipListRemoveNode(tSkipList *pSkipList, tSkipListNode *pNode); + +int32_t tSkipListDefaultCompare(tSkipList *pSkipList, tSkipListKey *a, tSkipListKey *b); + +// for debug purpose only +void tSkipListPrint(tSkipList *pSkipList, int16_t nlevel); + +/* + * range query & single point query function + */ +int32_t tSkipListQuery(tSkipList *pSkipList, tSKipListQueryCond *pQueryCond, tSkipListNode ***pResult); + +/* + * include/exclude point query + */ +int32_t tSkipListPointQuery(tSkipList *pSkipList, tSkipListKey *pKey, int32_t numOfKey, tSkipListPointQueryType type, + tSkipListNode ***pResult); + +void removeNodeEachLevel(tSkipList *pSkipList, int32_t nLevel); + +// todo move to utility +void tInitMatrix(double *x, double *y, int32_t length, double p[2][3]); + +int32_t tCompute(double p[2][3]); + +#ifdef __cplusplus +} +#endif + +#endif // TBASE_TSKIPLIST_H diff --git a/src/inc/tsocket.h b/src/inc/tsocket.h new file mode 100644 index 000000000000..0a02fcf551d2 --- /dev/null +++ b/src/inc/tsocket.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSOCKET_H +#define TDENGINE_TSOCKET_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +int taosNonblockwrite(int fd, char *ptr, int nbytes); + +int taosReadn(int sock, char *buffer, int len); + +int taosWriteMsg(int fd, void *ptr, int nbytes); + +int taosReadMsg(int fd, void *ptr, int nbytes); + +int taosOpenUdpSocket(char *ip, short port); + +int taosOpenTcpClientSocket(char *ip, short port, char *localIp); + +int taosOpenTcpServerSocket(char *ip, short port); + +int taosKeepTcpAlive(int sockFd); + +void taosCloseTcpSocket(int sockFd); + +int taosOpenUDServerSocket(char *ip, short port); + +int taosOpenUDClientSocket(char *ip, short port); + +int taosOpenRawSocket(char *ip); + +int taosCopyFds(int sfd, int dfd, int64_t len); + +int taosGetPublicIp(char *const ip); + +int taosGetPrivateIp(char *const ip); + +void tinet_ntoa(char *ipstr, unsigned int ip); + +int taosSetNonblocking(int sock, int on); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSOCKET_H diff --git a/src/inc/tsql.h b/src/inc/tsql.h new file mode 100644 index 000000000000..ec8454bd30a1 --- /dev/null +++ b/src/inc/tsql.h @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSQL_H +#define TDENGINE_TSQL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "taos.h" +#include "tsqldef.h" +#include "ttypes.h" + +#define TK_SPACE 200 +#define TK_COMMENT 201 +#define TK_ILLEGAL 202 +#define TK_HEX 203 +#define TK_OCT 204 + +#define TSQL_SO_ASC 1 +#define TSQL_SO_DESC 0 + +#define MAX_TOKEN_LEN 30 + +#define TSQL_TBNAME "TBNAME" +#define TSQL_TBNAME_L "tbname" + +#define TSQL_STABLE_QTYPE_COND 1 +#define TSQL_STABLE_QTYPE_SET 2 + +// token type +enum { + TSQL_NODE_TYPE_EXPR = 0x1, + TSQL_NODE_TYPE_ID = 0x2, + TSQL_NODE_TYPE_VALUE = 0x4, +}; + +extern char tTokenTypeSwitcher[12]; + +#define toTSDBType(x) \ + do { \ + if ((x) >= tListLen(tTokenTypeSwitcher)) { \ + (x) = TSDB_DATA_TYPE_BINARY; \ + } else { \ + (x) = tTokenTypeSwitcher[(x)]; \ + } \ + } while (0) + +typedef struct SLimitVal { + int64_t limit; + int64_t offset; +} SLimitVal; + +typedef struct SOrderVal { + int32_t order; + int32_t orderColId; +} SOrderVal; + +typedef struct tVariantListItem { + tVariant pVar; + uint8_t sortOrder; +} tVariantListItem; + +typedef struct tVariantList { + int32_t nExpr; /* Number of expressions on the list */ + int32_t nAlloc; /* Number of entries allocated below */ + tVariantListItem *a; /* One entry for each expression */ +} tVariantList; + +typedef struct tFieldList { + int32_t nField; + int32_t nAlloc; + TAOS_FIELD *p; +} tFieldList; + +// sql operation type +enum TSQL_TYPE { + TSQL_CREATE_NORMAL_METER = 0x01, + TSQL_CREATE_NORMAL_METRIC = 0x02, + TSQL_CREATE_METER_FROM_METRIC = 0x04, + TSQL_CREATE_STREAM = 0x08, + TSQL_QUERY_METER = 0x10, + TSQL_INSERT = 0x20, + + DROP_DNODE = 0x40, + DROP_DATABASE = 0x41, + DROP_TABLE = 0x42, + DROP_USER = 0x43, + DROP_ACCOUNT = 0x44, + + USE_DATABASE = 0x50, + + // show operation + SHOW_DATABASES = 0x60, + SHOW_TABLES = 0x61, + SHOW_STABLES = 0x62, + SHOW_MNODES = 0x63, + SHOW_DNODES = 0x64, + SHOW_ACCOUNTS = 0x65, + SHOW_USERS = 0x66, + SHOW_VGROUPS = 0x67, + SHOW_QUERIES = 0x68, + SHOW_STREAMS = 0x69, + SHOW_CONFIGS = 0x6a, + SHOW_SCORES = 0x6b, + SHOW_MODULES = 0x6c, + SHOW_CONNECTIONS = 0x6d, + SHOW_GRANTS = 0x6e, + + // create dnode + CREATE_DNODE = 0x80, + CREATE_DATABASE = 0x81, + CREATE_USER = 0x82, + CREATE_ACCOUNT = 0x83, + + DESCRIBE_TABLE = 0x90, + + ALTER_USER_PASSWD = 0xA0, + ALTER_USER_PRIVILEGES = 0xA1, + ALTER_DNODE = 0xA2, + ALTER_LOCAL = 0xA3, + ALTER_DATABASE = 0xA4, + ALTER_ACCT = 0xA5, + + // reset operation + RESET_QUERY_CACHE = 0xB0, + + // alter tags + ALTER_TABLE_TAGS_ADD = 0xC0, + ALTER_TABLE_TAGS_DROP = 0xC1, + ALTER_TABLE_TAGS_CHG = 0xC2, + ALTER_TABLE_TAGS_SET = 0xC4, + + // alter table column + ALTER_TABLE_ADD_COLUMN = 0xD0, + ALTER_TABLE_DROP_COLUMN = 0xD1, + + KILL_QUERY = 0xD2, + KILL_STREAM = 0xD3, + KILL_CONNECTION = 0xD4, +}; + +typedef struct SQuerySQL { + struct tSQLExprList *pSelection; // select clause + struct SSQLToken from; // from clause + struct tSQLExpr * pWhere; // where clause [optional] + tVariantList * pGroupby; // groupby clause, only for tags[optional] + tVariantList * pSortOrder; // orderby [optional] + SSQLToken interval; // interval [optional] + SSQLToken sliding; // sliding window [optional] + SLimitVal limit; // limit offset [optional] + SLimitVal glimit; // group limit offset [optional] + tVariantList * fillType; // fill type[optional] + SSQLToken selectToken; // sql string +} SQuerySQL; + +typedef struct SCreateTableSQL { + struct SSQLToken name; // meter name, create table [meterName] xxx + bool existCheck; + + struct { + tFieldList *pTagColumns; // for normal table, pTagColumns = NULL; + tFieldList *pColumns; + } colInfo; + + struct { + SSQLToken metricName; // metric name, for using clause + tVariantList *pTagVals; // create by using metric, tag value + } usingInfo; + + SQuerySQL *pSelect; + +} SCreateTableSQL; + +typedef struct SAlterTableSQL { + SSQLToken name; + tFieldList * pAddColumns; + SSQLToken dropTagToken; + tVariantList *varList; // set t=val or: change src dst +} SAlterTableSQL; + +typedef struct SInsertSQL { + SSQLToken name; + struct tSQLExprListList *pValue; +} SInsertSQL; + +typedef struct SCreateDBSQL { + SSQLToken dbname; + int32_t nReplica; + int32_t nDays; + + tVariantList *keep; + int32_t nRowsInFileBlock; + int32_t nCacheBlockSize; + float nCacheNumOfBlocks; + int32_t numOfBlocksPerTable; + + int32_t nTablesPerVnode; + int64_t commitTime; + int32_t commitLog; + int32_t compressionLevel; + SSQLToken precision; // time precision[ms by default/us] +} SCreateDBSQL; + +typedef struct SCreateAcctSQL { + int32_t users; + int32_t dbs; + int32_t tseries; + int32_t streams; + int32_t pps; + int64_t storage; + int64_t qtime; + int32_t conns; + SSQLToken stat; +} SCreateAcctSQL; + +typedef struct tDCLSQL { + int32_t nTokens; /* Number of expressions on the list */ + int32_t nAlloc; /* Number of entries allocated below */ + SSQLToken *a; /* one entry for element */ + + union { + SCreateDBSQL dbOpt; + SCreateAcctSQL acctOpt; + }; +} tDCLSQL; + +typedef struct SSqlInfo { + int32_t sqlType; + bool validSql; + + union { + SCreateTableSQL *pCreateTableInfo; + SInsertSQL * pInsertInfo; + SAlterTableSQL * pAlterInfo; + SQuerySQL * pQueryInfo; + tDCLSQL * pDCLInfo; + }; + + char pzErrMsg[256]; +} SSqlInfo; + +typedef struct tSQLExpr { + /* + * for single operand: + * TK_ALL + * TK_ID + * TK_SUM + * TK_AVG + * TK_MIN + * TK_MAX + * TK_FIRST + * TK_LAST + * TK_BOTTOM + * TK_TOP + * TK_STDDEV + * TK_PERCENTILE + * + * for binary operand: + * TK_LESS + * TK_LARGE + * TK_EQUAL etc... + */ + uint32_t nSQLOptr; // TK_FUNCTION: sql function, TK_LE: less than(binary expr) + + // the full sql string of function(col, param), which is actually the raw + // field name, + // since the function name is kept in nSQLOptr already + SSQLToken operand; + struct tSQLExprList *pParam; // function parameters + + SSQLToken colInfo; // field id + tVariant val; // value only for string, float, int + + struct tSQLExpr *pLeft; // left child + struct tSQLExpr *pRight; // right child +} tSQLExpr; + +// used in select clause. select from xxx +typedef struct tSQLExprItem { + tSQLExpr *pNode; // The list of expressions + char * aliasName; // alias name, null-terminated string +} tSQLExprItem; + +typedef struct tSQLExprList { + int32_t nExpr; /* Number of expressions on the list */ + int32_t nAlloc; /* Number of entries allocated below */ + tSQLExprItem *a; /* One entry for each expression */ +} tSQLExprList; + +typedef struct tSQLExprListList { + int32_t nList; /* Number of expressions on the list */ + int32_t nAlloc; /* Number of entries allocated below */ + tSQLExprList **a; /* one entry for each row */ +} tSQLExprListList; + +#define ParseTOKENTYPE SSQLToken + +void *ParseAlloc(void *(*mallocProc)(size_t)); + +/** + * + * @param yyp The parser + * @param yymajor The major token code number + * @param yyminor The value for the token + */ +void Parse(void *yyp, int yymajor, ParseTOKENTYPE yyminor, SSqlInfo *); + +/** + * + * @param p The parser to be deleted + * @param freeProc Function used to reclaim memory + */ +void ParseFree(void *p, void (*freeProc)(void *)); + +tVariantList *tVariantListAppend(tVariantList *pList, tVariant *pVar, uint8_t sortOrder); + +tVariantList *tVariantListInsert(tVariantList *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); + +void tVariantListDestroy(tVariantList *pList); + +tFieldList *tFieldListAppend(tFieldList *pList, TAOS_FIELD *pField); + +void tFieldListDestroy(tFieldList *pList); + +tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optType); + +void tSQLExprDestroy(tSQLExpr *); + +tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SSQLToken *pToken); + +void tSQLExprListDestroy(tSQLExprList *pList); + +int32_t tSQLSyntaxNodeToString(tSQLExpr *pNode, char *dst); + +SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection, SSQLToken *pFrom, tSQLExpr *pWhere, + tVariantList *pGroupby, tVariantList *pSortOrder, SSQLToken *pInterval, + SSQLToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); + +SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName, + tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type); + +SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type); + +tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList); + +void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList); + +void destroyQuerySql(SQuerySQL *pSql); + +void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type); + +void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists); + +void SQLInfoDestroy(SSqlInfo *pInfo); + +void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...); + +tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken); + +void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBSQL *pDB, SSQLToken *pIgExists); + +void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo); + +// prefix show db.tables; +void setDBName(SSQLToken *pCpxName, SSQLToken *pDB); + +tSQLExpr *tSQLExprIdValueCreate(SSQLToken *pToken, int32_t optType); + +tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SSQLToken *pFuncToken, SSQLToken *endToken, int32_t optType); + +void tSQLSetColumnInfo(TAOS_FIELD *pField, SSQLToken *pName, TAOS_FIELD *pType); + +void tSQLSetColumnType(TAOS_FIELD *pField, SSQLToken *pToken); + +int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pSql); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/tsqldef.h b/src/inc/tsqldef.h new file mode 100644 index 000000000000..71459b81ea2f --- /dev/null +++ b/src/inc/tsqldef.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSQLDEF_H +#define TDENGINE_TSQLDEF_H + +#define TK_ID 1 +#define TK_BOOL 2 +#define TK_TINYINT 3 +#define TK_SMALLINT 4 +#define TK_INTEGER 5 +#define TK_BIGINT 6 +#define TK_FLOAT 7 +#define TK_DOUBLE 8 +#define TK_STRING 9 +#define TK_TIMESTAMP 10 +#define TK_BINARY 11 +#define TK_NCHAR 12 +#define TK_OR 13 +#define TK_AND 14 +#define TK_NOT 15 +#define TK_EQ 16 +#define TK_NE 17 +#define TK_ISNULL 18 +#define TK_NOTNULL 19 +#define TK_IS 20 +#define TK_LIKE 21 +#define TK_GLOB 22 +#define TK_BETWEEN 23 +#define TK_IN 24 +#define TK_GT 25 +#define TK_GE 26 +#define TK_LT 27 +#define TK_LE 28 +#define TK_BITAND 29 +#define TK_BITOR 30 +#define TK_LSHIFT 31 +#define TK_RSHIFT 32 +#define TK_PLUS 33 +#define TK_MINUS 34 +#define TK_DIVIDE 35 +#define TK_TIMES 36 +#define TK_STAR 37 +#define TK_SLASH 38 +#define TK_REM 39 +#define TK_CONCAT 40 +#define TK_UMINUS 41 +#define TK_UPLUS 42 +#define TK_BITNOT 43 +#define TK_SHOW 44 +#define TK_DATABASES 45 +#define TK_MNODES 46 +#define TK_DNODES 47 +#define TK_USERS 48 +#define TK_MODULES 49 +#define TK_QUERIES 50 +#define TK_CONNECTIONS 51 +#define TK_STREAMS 52 +#define TK_CONFIGS 53 +#define TK_SCORES 54 +#define TK_GRANTS 55 +#define TK_DOT 56 +#define TK_TABLES 57 +#define TK_STABLES 58 +#define TK_VGROUPS 59 +#define TK_DROP 60 +#define TK_TABLE 61 +#define TK_DATABASE 62 +#define TK_USER 63 +#define TK_USE 64 +#define TK_DESCRIBE 65 +#define TK_ALTER 66 +#define TK_PASS 67 +#define TK_PRIVILEGE 68 +#define TK_DNODE 69 +#define TK_IP 70 +#define TK_LOCAL 71 +#define TK_IF 72 +#define TK_EXISTS 73 +#define TK_CREATE 74 +#define TK_KEEP 75 +#define TK_REPLICA 76 +#define TK_DAYS 77 +#define TK_ROWS 78 +#define TK_CACHE 79 +#define TK_ABLOCKS 80 +#define TK_TBLOCKS 81 +#define TK_CTIME 82 +#define TK_CLOG 83 +#define TK_COMP 84 +#define TK_PRECISION 85 +#define TK_LP 86 +#define TK_RP 87 +#define TK_TAGS 88 +#define TK_USING 89 +#define TK_AS 90 +#define TK_COMMA 91 +#define TK_NULL 92 +#define TK_SELECT 93 +#define TK_FROM 94 +#define TK_VARIABLE 95 +#define TK_INTERVAL 96 +#define TK_FILL 97 +#define TK_SLIDING 98 +#define TK_ORDER 99 +#define TK_BY 100 +#define TK_ASC 101 +#define TK_DESC 102 +#define TK_GROUP 103 +#define TK_HAVING 104 +#define TK_LIMIT 105 +#define TK_OFFSET 106 +#define TK_SLIMIT 107 +#define TK_SOFFSET 108 +#define TK_WHERE 109 +#define TK_NOW 110 +#define TK_INSERT 111 +#define TK_INTO 112 +#define TK_VALUES 113 +#define TK_RESET 114 +#define TK_QUERY 115 +#define TK_ADD 116 +#define TK_COLUMN 117 +#define TK_TAG 118 +#define TK_CHANGE 119 +#define TK_SET 120 +#define TK_KILL 121 +#define TK_CONNECTION 122 +#define TK_COLON 123 +#define TK_STREAM 124 +#define TK_ABORT 125 +#define TK_AFTER 126 +#define TK_ATTACH 127 +#define TK_BEFORE 128 +#define TK_BEGIN 129 +#define TK_CASCADE 130 +#define TK_CLUSTER 131 +#define TK_CONFLICT 132 +#define TK_COPY 133 +#define TK_DEFERRED 134 +#define TK_DELIMITERS 135 +#define TK_DETACH 136 +#define TK_EACH 137 +#define TK_END 138 +#define TK_EXPLAIN 139 +#define TK_FAIL 140 +#define TK_FOR 141 +#define TK_IGNORE 142 +#define TK_IMMEDIATE 143 +#define TK_INITIALLY 144 +#define TK_INSTEAD 145 +#define TK_MATCH 146 +#define TK_KEY 147 +#define TK_OF 148 +#define TK_RAISE 149 +#define TK_REPLACE 150 +#define TK_RESTRICT 151 +#define TK_ROW 152 +#define TK_STATEMENT 153 +#define TK_TRIGGER 154 +#define TK_VIEW 155 +#define TK_ALL 156 +#define TK_COUNT 157 +#define TK_SUM 158 +#define TK_AVG 159 +#define TK_MIN 160 +#define TK_MAX 161 +#define TK_FIRST 162 +#define TK_LAST 163 +#define TK_TOP 164 +#define TK_BOTTOM 165 +#define TK_STDDEV 166 +#define TK_PERCENTILE 167 +#define TK_APERCENTILE 168 +#define TK_LEASTSQUARES 169 +#define TK_HISTOGRAM 170 +#define TK_DIFF 171 +#define TK_SPREAD 172 +#define TK_WAVG 173 +#define TK_INTERP 174 +#define TK_LAST_ROW 175 +#define TK_SEMI 176 +#define TK_NONE 177 +#define TK_PREV 178 +#define TK_LINEAR 179 +#define TK_IMPORT 180 +#define TK_METRIC 181 +#define TK_TBNAME 182 +#define TK_JOIN 183 +#define TK_METRICS 184 +#define TK_STABLE 185 + +#endif \ No newline at end of file diff --git a/src/inc/tsqlfunction.h b/src/inc/tsqlfunction.h new file mode 100644 index 000000000000..7963dbe829d3 --- /dev/null +++ b/src/inc/tsqlfunction.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSQLFUNCTION_H +#define TDENGINE_TSQLFUNCTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include "trpc.h" +#include "tsql.h" +#include "ttypes.h" + +#define TSDB_FUNC_INVALID_ID -1 +#define TSDB_FUNC_COUNT 0 +#define TSDB_FUNC_SUM 1 +#define TSDB_FUNC_AVG 2 +#define TSDB_FUNC_MIN 3 +#define TSDB_FUNC_MAX 4 +#define TSDB_FUNC_STDDEV 5 +#define TSDB_FUNC_PERCT 6 +#define TSDB_FUNC_APERCT 7 +#define TSDB_FUNC_FIRST 8 +#define TSDB_FUNC_LAST 9 +#define TSDB_FUNC_LAST_ROW 10 +#define TSDB_FUNC_LEASTSQR 11 +#define TSDB_FUNC_TOP 12 +#define TSDB_FUNC_BOTTOM 13 +#define TSDB_FUNC_SPREAD 14 +#define TSDB_FUNC_WAVG 15 +#define TSDB_FUNC_TS 16 +#define TSDB_FUNC_TS_DUMMY 17 + +#define TSDB_FUNC_TAG 18 +#define TSDB_FUNC_PRJ 19 + +#define TSDB_FUNC_TAGPRJ 20 +#define TSDB_FUNC_ARITHM 21 +#define TSDB_FUNC_DIFF 22 + +#define TSDB_FUNC_SUM_DST 23 +#define TSDB_FUNC_AVG_DST 24 +#define TSDB_FUNC_MIN_DST 25 +#define TSDB_FUNC_MAX_DST 26 + +#define TSDB_FUNC_FIRST_DST 27 +#define TSDB_FUNC_LAST_DST 28 +#define TSDB_FUNC_LAST_ROW_DST 29 +#define TSDB_FUNC_SPREAD_DST 30 + +#define TSDB_FUNC_WAVG_DST 31 +#define TSDB_FUNC_TOP_DST 32 +#define TSDB_FUNC_BOTTOM_DST 33 +#define TSDB_FUNC_APERCT_DST 34 +#define TSDB_FUNC_INTERP 35 + +#define TSDB_FUNCSTATE_SO 0x1 // single output +#define TSDB_FUNCSTATE_MO 0x2 // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM +#define TSDB_FUNCSTATE_STREAM 0x4 // function avail for stream +#define TSDB_FUNCSTATE_METRIC 0x8 // function avail for metric +#define TSDB_FUNCSTATE_OF 0x10 // outer forward +#define TSDB_FUNCSTATE_NEED_TS 0x20 + +#define TSDB_BASE_FUNC_SO TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_OF +#define TSDB_BASE_FUNC_MO TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_METRIC | TSDB_FUNCSTATE_OF + +#define TSDB_PATTERN_MATCH 0 +#define TSDB_PATTERN_NOMATCH 1 +#define TSDB_PATTERN_NOWILDCARDMATCH 2 +#define TSDB_PATTERN_STRING_MAX_LEN 20 + +#define TSDB_FUNCTIONS_NAME_MAX_LENGTH 16 +#define TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE 50 + +#define PATTERN_COMPARE_INFO_INITIALIZER \ + { '%', '_' } + +#define DATA_SET_FLAG ',' // to denote the output area has data, not null value +#define DATA_SET_FLAG_SIZE sizeof(char) + +#define QUERY_ASC_FORWARD_STEP 1 +#define QUERY_DESC_FORWARD_STEP -1 +#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSQL_SO_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP) + +enum { + MASTER_SCAN = 0x0, + SUPPLEMENTARY_SCAN = 0x1, + SECONDARY_STAGE_MERGE = 0x10, +}; + +typedef struct { + SSqlFunctionExpr *pExpr; + int32_t elemSize[TSDB_MAX_COLUMNS]; + int32_t numOfCols; + int32_t offset; + char * data[TSDB_MAX_COLUMNS]; +} SArithmeticSupport; + +typedef struct SQLPreAggVal { + bool isSet; + int32_t numOfNullPoints; + int64_t wsum; + int64_t sum; + int64_t min; + int64_t max; +} SQLPreAggVal; + +/* sql function runtime context */ +typedef struct SQLFunctionCtx { + int32_t startOffset; + int32_t size; + int32_t order; + int32_t scanFlag; + + int16_t inputType; + int16_t inputBytes; + + int16_t outputType; + int16_t outputBytes; /* size of results, determined by function and input + column data type */ + + bool hasNullValue; /* null value exist in current block */ + int32_t blockStatus; /* Indicate if data is loaded, it is first/last/internal + block. Only for file blocks */ + + void * aInputElemBuf; + char * aOutputBuf; /* final result output buffer, point to sdata->data */ + int64_t numOfIteratedElems; /* total scanned points in processing, used for + complex query process */ + int32_t numOfOutputElems; + + int32_t currentStage; /* record current running step, default: 0 */ + + int64_t nStartQueryTimestamp; /* timestamp range of current query when + function is executed on a specific data block + */ + tVariant intermediateBuf[4]; /* to hold intermediate result */ + + int32_t numOfParams; + tVariant param[4]; /* input parameter, current support only one element */ + int64_t *ptsList; /* additional array list */ + void * ptsOutputBuf; /* output buffer for the corresponding timestamp of each + result, e.g., top/bottom*/ + + SQLPreAggVal preAggVals; +} SQLFunctionCtx; + +typedef struct SQLAggFuncElem { + char aName[TSDB_FUNCTIONS_NAME_MAX_LENGTH]; + + uint8_t nAggIdx; /* index of function in aAggs */ + int8_t stableFuncId; /* transfer function for metric query */ + uint16_t nStatus; + + /* setup the execute environment */ + void (*init)(SQLFunctionCtx *pCtx); + + /* main execution function */ + bool (*xFunction)(SQLFunctionCtx *pCtx); + + /* filter version */ + bool (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); + + /* + * some sql function require scan data twice or more in case of no index + * existing. + * e.g., stddev, percentile[disk based process for extremely large dataset] + * @param pCtx + */ + bool (*xNextStep)(SQLFunctionCtx *pCtx); + + /* + * finalizer must be called after all xFunction has been executed to + * generated final result. Otherwise, the value in aOutputBuf is a intern + * result. + */ + void (*xFinalize)(SQLFunctionCtx *pCtx); + + void (*distMergeFunc)(SQLFunctionCtx *pCtx); + + void (*distSecondaryMergeFunc)(SQLFunctionCtx *pCtx); + + int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId, int32_t blockStatus); +} SQLAggFuncElem; + +typedef struct SPatternCompareInfo { + char matchAll; // symbol for match all wildcard, default: '%' + char matchOne; // symbol for match one wildcard, default: '_' +} SPatternCompareInfo; + +void function_finalize(SQLFunctionCtx *pCtx); + +void getResultInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, int16_t *len); + +int patternMatch(const char *zPattern, const char *zString, size_t size, const SPatternCompareInfo *pInfo); + +int WCSPatternMatch(const wchar_t *zPattern, const wchar_t *zString, size_t size, + const struct SPatternCompareInfo *pInfo); + +#define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0) +#define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0) +#define IS_SINGLEOUTPUT(x) (((x)&TSDB_FUNCSTATE_SO) != 0) +#define IS_OUTER_FORWARD(x) (((x)&TSDB_FUNCSTATE_OF) != 0) + +/* + * the status of one block, used in metric query. all blocks are mixed together, + * we need the status to decide + * if one block is a first/end/inter block of one meter + */ +enum { + BLK_FILE_BLOCK = 0x1, + BLK_BLOCK_LOADED = 0x2, + BLK_CACHE_BLOCK = 0x4, // in case of cache block, block must be loaded +}; + +/* determine the real data need to calculated the result */ +enum { + BLK_DATA_NO_NEEDED = 0x0, + BLK_DATA_FILEDS_NEEDED = 0x1, + BLK_DATA_ALL_NEEDED = 0x3, +}; + +#define IS_FILE_BLOCK(x) (((x)&BLK_FILE_BLOCK) != 0) + +#define SET_FILE_BLOCK_FLAG(x) \ + do { \ + (x) &= (~BLK_CACHE_BLOCK); \ + (x) |= BLK_FILE_BLOCK; \ + } while (0); + +#define SET_CACHE_BLOCK_FLAG(x) ((x) = BLK_CACHE_BLOCK | BLK_BLOCK_LOADED); + +#define SET_DATA_BLOCK_NOT_LOADED(x) ((x) &= (~BLK_BLOCK_LOADED)); + +#define SET_DATA_BLOCK_LOADED(x) ((x) |= BLK_BLOCK_LOADED); +#define IS_DATA_BLOCK_LOADED(x) (((x)&BLK_BLOCK_LOADED) != 0) + +typedef struct SWavgRuntime { + int8_t valFlag; // flag to denote has value + int16_t type; // source data type + int64_t lastKey; + int64_t sKey; + int64_t eKey; + + union { + double dOutput; + int64_t iOutput; + }; + + union { + double dLastValue; + int64_t iLastValue; + }; +} SWavgRuntime; + +typedef struct SSumRuntime { + union { + double dOutput; + int64_t iOutput; + }; + int8_t valFlag; +} SSumRuntime; + +typedef struct SAvgRuntime { + double sum; + int64_t num; + int8_t valFlag; +} SAvgRuntime; + +/* global sql function array */ +extern struct SQLAggFuncElem aAggs[36]; + +/* compatible check array list */ +extern int32_t funcCompatList[36]; + +void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, int32_t type, int64_t *min, int64_t *max, + int64_t *sum, int64_t *wsum, int32_t *numOfNull); + +bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *minval, char *maxval); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSQLFUNCTION_H diff --git a/src/inc/tstatus.h b/src/inc/tstatus.h new file mode 100644 index 000000000000..7b2ff4a7dcf3 --- /dev/null +++ b/src/inc/tstatus.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSTATUS_H +#define TDENGINE_TSTATUS_H + +#ifdef __cplusplus +extern "C" { +#endif + +extern char *sdbDnodeStatusStr[]; +extern char *sdbDnodeBalanceStateStr[]; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TSTATUS_H diff --git a/src/inc/tstoken.h b/src/inc/tstoken.h new file mode 100644 index 000000000000..5305d2c8fac2 --- /dev/null +++ b/src/inc/tstoken.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTOKEN_H +#define TDENGINE_TTOKEN_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +// used to denote the minimum unite in sql parsing +typedef struct SSQLToken { + uint32_t n; + uint32_t type; + char * z; +} SSQLToken; + +char *tscGetToken(char *string, char **token, int *tokenLen); +char *tscGetTokenDelimiter(char *string, char **token, int *tokenLen, char *delimiters); + +/** + * tokenizer for sql string + * @param z + * @param tokenType + * @return + */ +uint32_t tSQLGetToken(char *z, uint32_t *tokenType); + +void tStrGetToken(char *str, int32_t *i, SSQLToken *t0, bool isPrevOptr); + +bool isKeyWord(const char *z, int32_t len); +bool isNumber(const SSQLToken *pToken); + +void shiftStr(char *dst, char *src); + +uint64_t changeToTimestampWithDynPrec(SSQLToken *pToken); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTOKEN_H diff --git a/src/inc/tsystem.h b/src/inc/tsystem.h new file mode 100644 index 000000000000..263133486e83 --- /dev/null +++ b/src/inc/tsystem.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TSYSTEM_H +#define TDENGINE_TSYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +bool taosGetSysMemory(float *memoryUsedMB); + +bool taosGetProcMemory(float *memoryUsedMB); + +bool taosGetDisk(float *diskUsedGB); + +bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage); + +bool taosGetBandSpeed(float *bandSpeedKb); + +bool taosGetProcIO(float *readKB, float *writeKB); + +void taosGetSystemInfo(); + +void taosKillSystem(); + +/* + * transfer charset from non-standard format to standard format, in line with + * requirements + * of library of libiconv + * + * NOTE: user need to free the string + */ +char *taosCharsetReplace(char *charsetstr); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/ttime.h b/src/inc/ttime.h new file mode 100644 index 000000000000..4aee10e416e1 --- /dev/null +++ b/src/inc/ttime.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTIME_H +#define TDENGINE_TTIME_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +//@return timestamp in second +int32_t taosGetTimestampSec(); + +//@return timestamp in millisecond +int64_t taosGetTimestampMs(); + +//@return timestamp in microsecond +int64_t taosGetTimestampUs(); + +/* + * @return timestamp decided by global conf variable, tsTimePrecision + * if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond. + * precision == TSDB_TIME_PRECISION_MILLI, it returns timestamp in millisecond. + */ +int64_t taosGetTimestamp(int32_t precision); + +int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts); + +int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTIME_H diff --git a/src/inc/ttimer.h b/src/inc/ttimer.h new file mode 100644 index 000000000000..9313bf3ec02c --- /dev/null +++ b/src/inc/ttimer.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTIMER_H +#define TDENGINE_TTIMER_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *tmr_h; + +extern int tmrDebugFlag; +extern int taosTmrThreads; + +void *taosTmrInit(int maxTmr, int resoultion, int longest, char *label); + +tmr_h taosTmrStart(void (*fp)(void *, void *), int mseconds, void *param1, void *handle); + +void taosTmrStop(tmr_h tmrId); + +void taosTmrStopA(tmr_h *timerId); + +void taosTmrReset(void (*fp)(void *, void *), int mseconds, void *param1, void *handle, tmr_h *pTmrId); + +void taosTmrCleanUp(void *handle); + +void taosTmrList(void *handle); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTIMER_H diff --git a/src/inc/ttypes.h b/src/inc/ttypes.h new file mode 100644 index 000000000000..8b31a446d30b --- /dev/null +++ b/src/inc/ttypes.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTYPES_H +#define TDENGINE_TTYPES_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include "tstoken.h" + +// Bytes for each type. +#define CHAR_BYTES sizeof(char) +#define SHORT_BYTES sizeof(short) +#define INT_BYTES sizeof(int) +#define LONG_BYTES sizeof(int64_t) +#define FLOAT_BYTES sizeof(float) +#define DOUBLE_BYTES sizeof(double) + +#define POINTER_BYTES sizeof(void *) // 8 by default assert(sizeof(ptrdiff_t) == sizseof(void*) + +typedef struct tDataDescriptor { + int16_t nType; + int16_t nameLen; + int32_t nSize; + char * aName; +} tDataDescriptor; + +extern tDataDescriptor tDataTypeDesc[11]; + +bool isValidDataType(int32_t type, int32_t length); +bool isNull(const char *val, int32_t type); + +void setNull(char *val, int32_t type, int32_t bytes); +void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); + +void assignVal(char *val, char *src, int32_t len, int32_t type); +void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size); + +// variant, each number/string/field_id has a corresponding struct during +// parsing sql +typedef struct tVariant { + uint32_t nType; + int32_t nLen; // only used for string, for number, it is useless + union { + int64_t i64Key; + double dKey; + char * pz; + wchar_t *wpz; + }; +} tVariant; + +void tVariantCreate(tVariant *pVar, SSQLToken *token); + +void tVariantCreateN(tVariant *pVar, char *pz, uint32_t len, uint32_t type); + +void tVariantCreateB(tVariant *pVar, char *pz, uint32_t len, uint32_t type); + +void tVariantDestroy(tVariant *pV); + +void tVariantAssign(tVariant *pDst, tVariant *pSrc); + +int32_t tVariantToString(tVariant *pVar, char *dst); + +int32_t tVariantDump(tVariant *pVariant, char *payload, char type); + +int32_t tVariantTypeSetType(tVariant *pVariant, char type); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTYPES_H diff --git a/src/inc/tutil.h b/src/inc/tutil.h new file mode 100644 index 000000000000..e4fa116d8125 --- /dev/null +++ b/src/inc/tutil.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TUTIL_H +#define TDENGINE_TUTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcrc32c.h" +#include "tsdb.h" + +#define VALIDFD(x) ((x) > 2) + +#define WCHAR wchar_t +#define tfree(x) \ + { \ + if (x) { \ + free(x); \ + x = NULL; \ + } \ + } + +#define taosCloseSocket(x) \ + { \ + if (VALIDFD(x)) { \ + close(x); \ + x = -1; \ + } \ + } +#define taosWriteSocket(fd, buf, len) write(fd, buf, len) +#define taosReadSocket(fd, buf, len) read(fd, buf, len) + +#define tclose(x) taosCloseSocket(x) + +#ifdef ASSERTION +#define ASSERT(x) assert(x) +#else +#define ASSERT(x) +#endif + +#ifdef UNUSED +#undefine UNUSED +#endif +#define UNUSED(x) ((void)(x)) + +#ifdef UNUSED_FUNC +#undefine UNUSED_FUNC +#endif + +#ifdef UNUSED_PARAM +#undef UNUSED_PARAM +#endif + +#if defined(__GNUC__) +#define UNUSED_PARAM(x) _UNUSED##x __attribute__((unused)) +#define UNUSED_FUNC __attribute__((unused)) +#else +#define UNUSED_PARAM(x) x +#define UNUSED_FUNC +#endif + +#ifdef tListLen +#undefine tListLen +#endif +#define tListLen(x) (sizeof(x) / sizeof((x)[0])) + +#if defined(__GNUC__) +#define FORCE_INLINE inline __attribute__((always_inline)) +#else +#define FORCE_INLINE +#endif + +#define DEFAULT_UNICODE_ENCODEC "UCS-4LE" + +#define SWAP(a, b) \ + do { \ + typeof(a) __tmp = (a); \ + (a) = (b); \ + (b) = __tmp; \ + } while (0) + +#define MAX(a, b) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + (__a > __b) ? __a : __b; \ + }) + +#define MIN(a, b) \ + ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + (__a < __b) ? __a : __b; \ + }) + +#define DEFAULT_COMP(x, y) \ + do { \ + if ((x) == (y)) { \ + return 0; \ + } else { \ + return (x) < (y) ? -1 : 1; \ + } \ + } while (0); + +#define GET_INT8_VAL(x) (*(int8_t *)(x)) +#define GET_INT16_VAL(x) (*(int16_t *)(x)) +#define GET_INT32_VAL(x) (*(int32_t *)(x)) +#define GET_INT64_VAL(x) (*(int64_t *)(x)) +#define GET_FLOAT_VAL(x) (*(float *)(x)) +#define GET_DOUBLE_VAL(x) (*(double *)(x)) + +#define ALIGN_NUM(n, align) (((n) + ((align)-1)) & (~((align)-1))) + +// align to 8bytes +#define ALIGN8(n) ALIGN_NUM(n, 8) + +#define MILLISECOND_PER_SECOND (1000L) + +#define MILLISECOND_PER_MINUTE (MILLISECOND_PER_SECOND * 60) +#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60) +#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24) +#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7) +#define MILLISECOND_PER_MONTH (MILLISECOND_PER_DAY * 30) +#define MILLISECOND_PER_YEAR (MILLISECOND_PER_DAY * 365) + +#define POW2(x) ((x) * (x)) + +int32_t strdequote(char *src); + +void strtrim(char *src); + +char *strnchr(char *haystack, char needle, int32_t len); + +char **strsplit(char *src, const char *delim, int32_t *num); + +void strtolower(char *src, char *dst); + +int64_t strnatoi(char *num, int32_t len); + +char* strreplace(const char* str, const char* pattern, const char* rep); + +char *paGetToken(char *src, char **token, int32_t *tokenLen); + +void taosMsleep(int32_t mseconds); + +int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); + +int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); + +int64_t str2int64(char *str); + +int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstPath); + +bool taosCheckPthreadValid(pthread_t thread); + +void taosResetPthread(pthread_t *thread); + +int64_t taosGetPthreadId(); + +int32_t taosInitTimer(void *(*callback)(void *), int32_t ms); + +/** + * murmur hash algorithm + * @key usually string + * @len key length + * @seed hash seed + * @out an int32 value + */ +uint32_t MurmurHash3_32(const void *key, int32_t len); + +bool taosCheckDbName(char *db, char *monitordb); + +bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len); + +bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs); + +bool taosValidateEncodec(char *encodec); + +#define __sync_val_compare_and_swap_64 __sync_val_compare_and_swap +#define __sync_val_compare_and_swap_32 __sync_val_compare_and_swap +#define __sync_add_and_fetch_64 __sync_add_and_fetch +#define __sync_add_and_fetch_32 __sync_add_and_fetch + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TUTIL_H \ No newline at end of file diff --git a/src/kit/inc/shell.h b/src/kit/inc/shell.h new file mode 100644 index 000000000000..e23378b179e7 --- /dev/null +++ b/src/kit/inc/shell.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __SHELL__ +#define __SHELL__ + +#include "stdbool.h" +#include "taos.h" +#include "tlog.h" +#include "tsdb.h" + +#define MAX_USERNAME_SIZE 64 +#define MAX_DBNAME_SIZE 64 +#define MAX_IP_SIZE 20 +#define MAX_PASSWORD_SIZE 20 +#define MAX_HISTORY_SIZE 1000 +#define MAX_COMMAND_SIZE 65536 +#define HISTORY_FILE ".taos_history" + +#define BOOL_OUTPUT_LENGTH 6 +#define TINYINT_OUTPUT_LENGTH 6 +#define SMALLINT_OUTPUT_LENGTH 7 +#define INT_OUTPUT_LENGTH 11 +#define BIGINT_OUTPUT_LENGTH 21 +#define FLOAT_OUTPUT_LENGTH 20 +#define DOUBLE_OUTPUT_LENGTH 25 +#define BINARY_OUTPUT_LENGTH 20 + +// dynamic config timestamp width according to maximum time precision +extern int32_t TIMESTAMP_OUTPUT_LENGTH; + +typedef struct History History; +struct History { + char* hist[MAX_HISTORY_SIZE]; + int hstart; + int hend; +}; + +struct arguments { + char* host; + char* password; + char* user; + char* database; + char* timezone; + bool is_raw_time; + bool is_use_passwd; + char file[TSDB_FILENAME_LEN]; + char* commands; + int abort; +}; + +/**************** Function declarations ****************/ +extern void shellParseArgument(int argc, char* argv[], struct arguments* arguments); +extern TAOS* shellInit(struct arguments* args); +extern void* shellLoopQuery(void* arg); +extern void taos_error(TAOS* con); +extern int regex_match(const char* s, const char* reg, int cflags); +void shellReadCommand(TAOS* con, char command[]); +void shellRunCommand(TAOS* con, char* command); +void shellRunCommandOnServer(TAOS* con, char command[]); +void read_history(); +void write_history(); +void source_file(TAOS* con, char* fptr); +void get_history_path(char* history); +void cleanup_handler(void* arg); +void exitShell(); +int shellDumpResult(TAOS* con, char* fname, int* error_no); +void shellPrintNChar(char* str, int width); +#define max(a, b) ((int)(a) < (int)(b) ? (int)(b) : (int)(a)) + +/**************** Global variable declarations ****************/ +extern char PROMPT_HEADER[]; +extern char CONTINUE_PROMPT[]; +extern int prompt_size; +extern History history; +extern struct termios oldtio; +extern void set_terminal_mode(); +extern int get_old_terminal_mode(struct termios* tio); +extern void reset_terminal_mode(); +extern struct arguments args; +extern TAOS_RES* result; + +#endif diff --git a/src/kit/inc/shellCommand.h b/src/kit/inc/shellCommand.h new file mode 100644 index 000000000000..dbf966e16539 --- /dev/null +++ b/src/kit/inc/shellCommand.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __COMMAND_STRUCT__ +#define __COMMAND_STRUCT__ + +#include "shell.h" + +#define LEFT 1 +#define RIGHT 2 +#define UP 3 +#define DOWN 4 + +typedef struct Command Command; +struct Command { + char * buffer; + char * command; + unsigned commandSize; + unsigned bufferSize; + unsigned cursorOffset; + unsigned screenOffset; + unsigned endOffset; +}; + +extern void insertChar(Command *cmd, char *c, int size); +extern void backspaceChar(Command *cmd); +extern void deleteChar(Command *cmd); +extern void moveCursorLeft(Command *cmd); +extern void moveCursorRight(Command *cmd); +extern void positionCursorHome(Command *cmd); +extern void positionCursorEnd(Command *cmd); +extern void showOnScreen(Command *cmd); +extern void updateBuffer(Command *cmd); +extern int isReadyGo(Command *cmd); +extern void resetCommand(Command *cmd, const char s[]); + +int countPrefixOnes(char c); +void clearScreen(int ecmd_pos, int cursor_pos); +void printChar(char c, int times); +void positionCursor(int step, int direction); + +#endif diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt new file mode 100644 index 000000000000..7e0a5ca2e36c --- /dev/null +++ b/src/kit/shell/CMakeLists.txt @@ -0,0 +1,23 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +SET(SHELL_BUILD_NAME "ts") +SET(SHELL_FINAL_NAME "taos") +SET(SHELL_CMD_NAME "taos_cmd") +SET(SHELL_TARGET_NAME "taos_target") + +AUX_SOURCE_DIRECTORY(. SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ../inc ../../client/inc) + +ADD_EXECUTABLE(${SHELL_BUILD_NAME} ${SRC}) +TARGET_LINK_LIBRARIES(${SHELL_BUILD_NAME} taos_static trpc tutil pthread) + +ADD_CUSTOM_COMMAND(OUTPUT ${SHELL_CMD_NAME} + POST_BUILD + COMMAND echo "rename file" + DEPENDS ${SHELL_BUILD_NAME} + COMMAND ${CMAKE_COMMAND} -E rename ${EXECUTABLE_OUTPUT_PATH}/${SHELL_BUILD_NAME} ${EXECUTABLE_OUTPUT_PATH}/${SHELL_FINAL_NAME} + COMMENT "rename ts to taos") + +ADD_CUSTOM_TARGET(${SHELL_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${SHELL_CMD_NAME}) diff --git a/src/kit/shell/shellCommand.c b/src/kit/shell/shellCommand.c new file mode 100644 index 000000000000..72e669d8b29a --- /dev/null +++ b/src/kit/shell/shellCommand.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#define __USE_XOPEN + +#include + +#include "shell.h" +#include "shellCommand.h" + +typedef struct { + char widthInString; + char widthOnScreen; +} UTFCodeInfo; + +int countPrefixOnes(char c) { + unsigned char mask = 127; + mask = ~mask; + int ret = 0; + while ((c & mask) != 0) { + ret++; + c <<= 1; + } + + return ret; +} + +void getPrevCharSize(const char *str, int pos, int *size, int *width) { + assert(pos > 0); + + wchar_t wc; + *size = 0; + *width = 0; + + while (--pos >= 0) { + *size += 1; + + if (str[pos] > 0 || countPrefixOnes(str[pos]) > 1) break; + } + + assert(mbtowc(&wc, str + pos, MB_CUR_MAX) == *size); + + *width = wcwidth(wc); +} + +void getNextCharSize(const char *str, int pos, int *size, int *width) { + assert(pos >= 0); + + wchar_t wc; + *size = mbtowc(&wc, str + pos, MB_CUR_MAX); + *width = wcwidth(wc); +} + +void insertChar(Command *cmd, char *c, int size) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + wchar_t wc; + if (mbtowc(&wc, c, size) < 0) return; + + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + /* update the buffer */ + memmove(cmd->command + cmd->cursorOffset + size, cmd->command + cmd->cursorOffset, + cmd->commandSize - cmd->cursorOffset); + memcpy(cmd->command + cmd->cursorOffset, c, size); + /* update the values */ + cmd->commandSize += size; + cmd->cursorOffset += size; + cmd->screenOffset += wcwidth(wc); + cmd->endOffset += wcwidth(wc); + showOnScreen(cmd); +} + +void backspaceChar(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (cmd->cursorOffset > 0) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + int size = 0; + int width = 0; + getPrevCharSize(cmd->command, cmd->cursorOffset, &size, &width); + memmove(cmd->command + cmd->cursorOffset - size, cmd->command + cmd->cursorOffset, + cmd->commandSize - cmd->cursorOffset); + cmd->commandSize -= size; + cmd->cursorOffset -= size; + cmd->screenOffset -= width; + cmd->endOffset -= width; + showOnScreen(cmd); + } +} + +void deleteChar(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (cmd->cursorOffset < cmd->commandSize) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + int size = 0; + int width = 0; + getNextCharSize(cmd->command, cmd->cursorOffset, &size, &width); + memmove(cmd->command + cmd->cursorOffset, cmd->command + cmd->cursorOffset + size, + cmd->commandSize - cmd->cursorOffset - size); + cmd->commandSize -= size; + cmd->endOffset -= width; + showOnScreen(cmd); + } +} + +void moveCursorLeft(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (cmd->cursorOffset > 0) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + int size = 0; + int width = 0; + getPrevCharSize(cmd->command, cmd->cursorOffset, &size, &width); + cmd->cursorOffset -= size; + cmd->screenOffset -= width; + showOnScreen(cmd); + } +} + +void moveCursorRight(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (cmd->cursorOffset < cmd->commandSize) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + int size = 0; + int width = 0; + getNextCharSize(cmd->command, cmd->cursorOffset, &size, &width); + cmd->cursorOffset += size; + cmd->screenOffset += width; + showOnScreen(cmd); + } +} + +void positionCursorHome(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (cmd->cursorOffset > 0) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + cmd->cursorOffset = 0; + cmd->screenOffset = 0; + showOnScreen(cmd); + } +} + +void positionCursorEnd(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (cmd->cursorOffset < cmd->commandSize) { + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + cmd->cursorOffset = cmd->commandSize; + cmd->screenOffset = cmd->endOffset; + showOnScreen(cmd); + } +} + +void printChar(char c, int times) { + for (int i = 0; i < times; i++) { + fprintf(stdout, "%c", c); + } + fflush(stdout); +} + +void positionCursor(int step, int direction) { + if (step > 0) { + if (direction == LEFT) { + fprintf(stdout, "\033[%dD", step); + } else if (direction == RIGHT) { + fprintf(stdout, "\033[%dC", step); + } else if (direction == UP) { + fprintf(stdout, "\033[%dA", step); + } else if (direction == DOWN) { + fprintf(stdout, "\033[%dB", step); + } + fflush(stdout); + } +} + +void updateBuffer(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + if (regex_match(cmd->buffer, "(\\s+$)|(^$)", REG_EXTENDED)) strcat(cmd->command, " "); + strcat(cmd->buffer, cmd->command); + cmd->bufferSize += cmd->commandSize; + + memset(cmd->command, 0, MAX_COMMAND_SIZE); + cmd->cursorOffset = 0; + cmd->screenOffset = 0; + cmd->commandSize = 0; + cmd->endOffset = 0; + showOnScreen(cmd); +} + +int isReadyGo(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + char *total = (char *)calloc(1, MAX_COMMAND_SIZE); + memset(cmd->command + cmd->commandSize, 0, MAX_COMMAND_SIZE - cmd->commandSize); + sprintf(total, "%s%s", cmd->buffer, cmd->command); + + char *reg_str = + "(^.*;\\s*$)|(^\\s*$)|(^\\s*exit\\s*$)|(^\\s*q\\s*$)|(^\\s*quit\\s*$)|(^" + "\\s*clear\\s*$)"; + if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) { + free(total); + return 1; + } + + free(total); + return 0; +} + +void getMbSizeInfo(const char *str, int *size, int *width) { + wchar_t *wc = (wchar_t *)calloc(sizeof(wchar_t), MAX_COMMAND_SIZE); + *size = strlen(str); + mbstowcs(wc, str, MAX_COMMAND_SIZE); + *width = wcswidth(wc, MAX_COMMAND_SIZE); + free(wc); +} + +void resetCommand(Command *cmd, const char s[]) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + memset(cmd->buffer, 0, MAX_COMMAND_SIZE); + memset(cmd->command, 0, MAX_COMMAND_SIZE); + strcpy(cmd->command, s); + int size = 0; + int width = 0; + getMbSizeInfo(s, &size, &width); + cmd->bufferSize = 0; + cmd->commandSize = size; + cmd->cursorOffset = size; + cmd->screenOffset = width; + cmd->endOffset = width; + showOnScreen(cmd); +} diff --git a/src/kit/shell/shellEngine.c b/src/kit/shell/shellEngine.c new file mode 100644 index 000000000000..dc7ae0994524 --- /dev/null +++ b/src/kit/shell/shellEngine.c @@ -0,0 +1,642 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _XOPEN_SOURCE +#define _BSD_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "shell.h" +#include "shellCommand.h" +#include "ttime.h" +#include "tutil.h" + +/**************** Global variables ****************/ +char VERSION_INFO[] = + "Welcome to the TDengine shell, server version:%s client version:%s\n" + "Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "taos> "; +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 6; +TAOS_RES *result = NULL; +History history; + +/* + * FUNCTION: Initialize the shell. + */ +TAOS *shellInit(struct arguments *args) { + // set options before initializing + if (args->timezone != NULL) { + taos_options(TSDB_OPTION_TIMEZONE, args->timezone); + } + + if (args->is_use_passwd) { + args->password = getpass("Enter password: "); + } else { + args->password = tsDefaultPass; + } + + if (args->user == NULL) { + args->user = tsDefaultUser; + } + + taos_init(); + /* + * set tsMetricMetaKeepTimer = 3000ms + * set tsMeterMetaKeepTimer = 3000ms + * means not save cache in shell + */ + tsMetricMetaKeepTimer = 3; + tsMeterMetaKeepTimer = 3000; + + // Connect to the database. + TAOS *con = taos_connect(args->host, args->user, args->password, args->database, tsMgmtShellPort); + if (con == NULL) { + return con; + } + + /* Read history TODO : release resources here*/ + read_history(); + + // Check if it is temperory run + if (args->commands != NULL || args->file[0] != 0) { + if (args->commands != NULL) { + char *token; + token = strtok(args->commands, ";"); + while (token != NULL) { + printf("%s%s\n", PROMPT_HEADER, token); + shellRunCommand(con, token); + token = strtok(NULL, ";"); + } + } + + if (args->file[0] != 0) { + source_file(con, args->file); + } + taos_close(con); + + write_history(); + exit(EXIT_SUCCESS); + } + + printf("\n"); + printf(VERSION_INFO, taos_get_server_info(con), taos_get_client_info()); + + return con; +} + +void shellReplaceCtrlChar(char *str) { + _Bool ctrlOn = false; + char *pstr = NULL; + + for (pstr = str; *str != '\0'; ++str) { + if (ctrlOn) { + switch (*str) { + case 'n': + *pstr = '\n'; + pstr++; + break; + case 'r': + *pstr = '\r'; + pstr++; + break; + case 't': + *pstr = '\t'; + pstr++; + break; + case '\\': + *pstr = '\\'; + pstr++; + break; + default: + break; + } + ctrlOn = false; + } else { + if (*str == '\\') { + ctrlOn = true; + } else { + *pstr = *str; + pstr++; + } + } + } + *pstr = '\0'; +} + +void shellRunCommand(TAOS *con, char *command) { + /* If command is empty just return */ + if (regex_match(command, "^[ \t;]*$", REG_EXTENDED)) { + return; + } + + /* Update the history vector. */ + if (history.hstart == history.hend || + history.hist[(history.hend + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE] == NULL || + strcmp(command, history.hist[(history.hend + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE]) != 0) { + if (history.hist[history.hend] != NULL) { + tfree(history.hist[history.hend]); + } + history.hist[history.hend] = strdup(command); + + history.hend = (history.hend + 1) % MAX_HISTORY_SIZE; + if (history.hend == history.hstart) { + history.hstart = (history.hstart + 1) % MAX_HISTORY_SIZE; + } + } + + shellReplaceCtrlChar(command); + + // Analyse the command. + if (regex_match(command, "^[ \t]*(quit|q|exit)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { + taos_close(con); + write_history(); + exitShell(); + } else if (regex_match(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { + // If clear the screen. + system("clear"); + return; + } else if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) { + /* If source file. */ + char *c_ptr = strtok(command, " ;"); + assert(c_ptr != NULL); + c_ptr = strtok(NULL, " ;"); + assert(c_ptr != NULL); + + source_file(con, c_ptr); + } else { + shellRunCommandOnServer(con, command); + } +} + +void shellRunCommandOnServer(TAOS *con, char command[]) { + int64_t st, et; + wordexp_t full_path; + char *sptr = NULL; + char *cptr = NULL; + char *fname = NULL; + + if ((sptr = strstr(command, ">>")) != NULL) { + cptr = strstr(command, ";"); + if (cptr != NULL) { + *cptr = '\0'; + } + + if (wordexp(sptr + 2, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: invalid filename: %s\n", sptr + 2); + return; + } + *sptr = '\0'; + fname = full_path.we_wordv[0]; + } + + st = taosGetTimestampUs(); + + if (taos_query(con, command)) { + taos_error(con); + return; + } + + if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { + fprintf(stdout, "Database changed.\n\n"); + fflush(stdout); + return; + } + + int num_fields = taos_field_count(con); + if (num_fields != 0) { // select and show kinds of commands + int error_no = 0; + int numOfRows = shellDumpResult(con, fname, &error_no); + if (numOfRows < 0) return; + + et = taosGetTimestampUs(); + if (error_no == 0) { + printf("Query OK, %d row(s) in set (%.6fs)\n", numOfRows, (et - st) / 1E6); + } else { + printf("Query interrupted (%s), %d row(s) in set (%.6fs)\n", taos_errstr(con), numOfRows, (et - st) / 1E6); + } + } else { + int num_rows_affacted = taos_affected_rows(con); + et = taosGetTimestampUs(); + printf("Query OK, %d row(s) affected (%.6fs)\n", num_rows_affacted, (et - st) / 1E6); + } + + printf("\n"); + + if (fname != NULL) { + wordfree(&full_path); + } + return; +} + +/* Function to do regular expression check */ +int regex_match(const char *s, const char *reg, int cflags) { + regex_t regex; + char msgbuf[100]; + + /* Compile regular expression */ + if (regcomp(®ex, reg, cflags) != 0) { + fprintf(stderr, "Fail to compile regex"); + exitShell(); + } + + /* Execute regular expression */ + int reti = regexec(®ex, s, 0, NULL, 0); + if (!reti) { + regfree(®ex); + return 1; + } else if (reti == REG_NOMATCH) { + regfree(®ex); + return 0; + } else { + regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); + fprintf(stderr, "Regex match failed: %s\n", msgbuf); + regfree(®ex); + exitShell(); + } + + return 0; +} + +int shellDumpResult(TAOS *con, char *fname, int *error_no) { + TAOS_ROW row = NULL; + int numOfRows = 0; + time_t tt; + char buf[25] = "\0"; + struct tm *ptm; + int output_bytes = 0; + FILE *fp = NULL; + int num_fields = taos_field_count(con); + wordexp_t full_path; + + assert(num_fields != 0); + + result = taos_use_result(con); + if (result == NULL) { + taos_error(con); + return -1; + } + + if (fname != NULL) { + if (wordexp(fname, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: invalid file name: %s\n", fname); + return -1; + } + + fp = fopen(full_path.we_wordv[0], "w"); + if (fp == NULL) { + fprintf(stderr, "ERROR: failed to open file: %s\n", full_path.we_wordv[0]); + wordfree(&full_path); + return -1; + } + + wordfree(&full_path); + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + row = taos_fetch_row(result); + char t_str[TSDB_MAX_BYTES_PER_ROW] = "\0"; + int l[TSDB_MAX_COLUMNS] = {0}; + + if (row) { + // Print the header indicator + if (fname == NULL) { // print to standard output + for (int col = 0; col < num_fields; col++) { + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + l[col] = max(BOOL_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_TINYINT: + l[col] = max(TINYINT_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_SMALLINT: + l[col] = max(SMALLINT_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_INT: + l[col] = max(INT_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_BIGINT: + l[col] = max(BIGINT_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_FLOAT: + l[col] = max(FLOAT_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_DOUBLE: + l[col] = max(DOUBLE_OUTPUT_LENGTH, strlen(fields[col].name)); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + l[col] = max(fields[col].bytes, strlen(fields[col].name)); + /* l[col] = max(BINARY_OUTPUT_LENGTH, strlen(fields[col].name)); */ + break; + case TSDB_DATA_TYPE_TIMESTAMP: { + int32_t defaultWidth = TIMESTAMP_OUTPUT_LENGTH; + if (args.is_raw_time) { + defaultWidth = 14; + } + if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { + defaultWidth += 3; + } + l[col] = max(defaultWidth, strlen(fields[col].name)); + + break; + } + default: + break; + } + + int spaces = (int)(l[col] - strlen(fields[col].name)); + int left_space = spaces / 2; + int right_space = (spaces % 2 ? left_space + 1 : left_space); + printf("%*.s%s%*.s|", left_space, " ", fields[col].name, right_space, " "); + output_bytes += (l[col] + 1); + } + printf("\n"); + for (int k = 0; k < output_bytes; k++) printf("="); + printf("\n"); + + // print the elements + do { + for (int i = 0; i < num_fields; i++) { + if (row[i] == NULL) { + printf("%*s|", l[i], TSDB_DATA_NULL_STR); + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + printf("%*s|", l[i], ((((int)(*((char *)row[i]))) == 1) ? "true" : "false")); + break; + case TSDB_DATA_TYPE_TINYINT: + printf("%*d|", l[i], (int)(*((char *)row[i]))); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%*d|", l[i], (int)(*((short *)row[i]))); + break; + case TSDB_DATA_TYPE_INT: + printf("%*d|", l[i], *((int *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + printf("%*ld|", l[i], *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%*.5f|", l[i], *((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%*.9f|", l[i], *((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW); + memcpy(t_str, row[i], fields[i].bytes); + /* printf("%-*s|",max(fields[i].bytes, strlen(fields[i].name)), + * t_str); */ + /* printf("%-*s|", l[i], t_str); */ + shellPrintNChar(t_str, l[i]); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (args.is_raw_time) { + printf(" %ld|", *(int64_t *)row[i]); + } else { + if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { + tt = *(time_t *)row[i] / 1000000; + } else { + tt = *(time_t *)row[i] / 1000; + } + + ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + + if (taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO) { + printf(" %s.%06d|", buf, (int)(*(time_t *)row[i] % 1000000)); + } else { + printf(" %s.%03d|", buf, (int)(*(time_t *)row[i] % 1000)); + } + } + break; + default: + break; + } + } + + printf("\n"); + numOfRows++; + } while ((row = taos_fetch_row(result))); + + } else { // dump to file + do { + for (int i = 0; i < num_fields; i++) { + if (row[i]) { + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + fprintf(fp, "%d", ((((int)(*((char *)row[i]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + fprintf(fp, "%d", (int)(*((char *)row[i]))); + break; + case TSDB_DATA_TYPE_SMALLINT: + fprintf(fp, "%d", (int)(*((short *)row[i]))); + break; + case TSDB_DATA_TYPE_INT: + fprintf(fp, "%d", *((int *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + fprintf(fp, "%ld", *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + fprintf(fp, "%.5f", *((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(fp, "%.9f", *((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memset(t_str, 0, TSDB_MAX_BYTES_PER_ROW); + memcpy(t_str, row[i], fields[i].bytes); + fprintf(fp, "%s", t_str); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + fprintf(fp, "%ld", *(int64_t *)row[i]); + break; + default: + break; + } + } else { + fprintf(fp, "%s", TSDB_DATA_NULL_STR); + } + if (i < num_fields - 1) { + fprintf(fp, ","); + } else { + fprintf(fp, "\n"); + } + } + + numOfRows++; + } while ((row = taos_fetch_row(result))); + } + } + + *error_no = taos_errno(con); + + taos_free_result(result); + result = NULL; + + if (fname != NULL) { + fclose(fp); + } + + return numOfRows; +} + +void read_history() { + // Initialize history + memset(history.hist, 0, sizeof(char *) * MAX_HISTORY_SIZE); + history.hstart = 0; + history.hend = 0; + char *line = NULL; + size_t line_size = 0; + int read_size = 0; + + char f_history[TSDB_FILENAME_LEN]; + get_history_path(f_history); + + if (access(f_history, R_OK) == -1) { + return; + } + + FILE *f = fopen(f_history, "r"); + if (f == NULL) { + fprintf(stderr, "Opening file %s\n", f_history); + return; + } + + while ((read_size = getline(&line, &line_size, f)) != -1) { + line[read_size - 1] = '\0'; + history.hist[history.hend] = strdup(line); + + history.hend = (history.hend + 1) % MAX_HISTORY_SIZE; + + if (history.hend == history.hstart) { + history.hstart = (history.hstart + 1) % MAX_HISTORY_SIZE; + } + } + + free(line); + fclose(f); +} + +void write_history() { + char f_history[128]; + get_history_path(f_history); + + FILE *f = fopen(f_history, "w"); + if (f == NULL) { + fprintf(stderr, "Opening file %s\n", f_history); + return; + } + + for (int i = history.hstart; i != history.hend;) { + if (history.hist[i] != NULL) { + fprintf(f, "%s\n", history.hist[i]); + tfree(history.hist[i]); + } + i = (i + 1) % MAX_HISTORY_SIZE; + } + fclose(f); +} + +void taos_error(TAOS *con) { + fprintf(stderr, "TSDB error: %s\n", taos_errstr(con)); + + /* free local resouce: allocated memory/metric-meta refcnt */ + TAOS_RES *pRes = taos_use_result(con); + taos_free_result(pRes); +} + +static int isCommentLine(char *line) { + if (line == NULL) return 1; + + return regex_match(line, "^\\s*#.*", REG_EXTENDED); +} + +void source_file(TAOS *con, char *fptr) { + wordexp_t full_path; + int read_len = 0; + char *cmd = malloc(MAX_COMMAND_SIZE); + size_t cmd_len = 0; + char *line = NULL; + size_t line_len = 0; + + if (wordexp(fptr, &full_path, 0) != 0) { + fprintf(stderr, "ERROR: illegal file name\n"); + return; + } + + char *fname = full_path.we_wordv[0]; + + if (access(fname, R_OK) == -1) { + fprintf(stderr, "ERROR: file %s is not readable\n", fptr); + wordfree(&full_path); + return; + } + + FILE *f = fopen(fname, "r"); + if (f == NULL) { + fprintf(stderr, "ERROR: failed to open file %s\n", fname); + wordfree(&full_path); + return; + } + + while ((read_len = getline(&line, &line_len, f)) != -1) { + line[--read_len] = '\0'; + + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; + } + + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } + + memcpy(cmd + cmd_len, line, read_len); + printf("%s%s\n", PROMPT_HEADER, cmd); + shellRunCommand(con, cmd); + memset(cmd, 0, MAX_COMMAND_SIZE); + cmd_len = 0; + } + + free(cmd); + if (line) free(line); + wordfree(&full_path); + fclose(f); +} diff --git a/src/kit/shell/shellLinux.c b/src/kit/shell/shellLinux.c new file mode 100644 index 000000000000..6602ac742e87 --- /dev/null +++ b/src/kit/shell/shellLinux.c @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define __USE_XOPEN + +#include + +#include "shell.h" +#include "shellCommand.h" +#include "tkey.h" + +#define OPT_ABORT 1 /* �Cabort */ + +int indicator = 1; +struct termios oldtio; + +const char *argp_program_version = version; +const char *argp_program_bug_address = ""; +static char doc[] = ""; +static char args_doc[] = ""; +static struct argp_option options[] = { + {"host", 'h', "HOST", 0, "TDEngine server IP address to connect. The default host is localhost."}, + {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, + {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, + {"user", 'u', "USER", 0, "The TDEngine user name to use when connecting to the server."}, + {"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."}, + {"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."}, + {"raw-time", 'r', 0, 0, "Output time as unsigned long."}, + {"file", 'f', "FILE", 0, "Script to run without enter the shell."}, + {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, + {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, + {0}}; + +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + /* Get the input argument from argp_parse, which we + know is a pointer to our arguments structure. */ + struct arguments *arguments = state->input; + wordexp_t full_path; + + switch (key) { + case 'h': + arguments->host = arg; + break; + case 'p': + if (arg) + arguments->password = arg; + else + arguments->is_use_passwd = true; + break; + case 'P': + tsMgmtShellPort = atoi(arg); + break; + case 't': + arguments->timezone = arg; + break; + case 'u': + arguments->user = arg; + break; + case 'c': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(configDir, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case 's': + arguments->commands = arg; + break; + case 'r': + arguments->is_raw_time = true; + break; + case 'f': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(arguments->file, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case 'd': + arguments->database = arg; + break; + case OPT_ABORT: + arguments->abort = 1; + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +/* Our argp parser. */ +static struct argp argp = {options, parse_opt, args_doc, doc}; + +void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { + argp_parse(&argp, argc, argv, 0, 0, arguments); + if (arguments->abort) { + error(10, 0, "ABORTED"); + } +} + +void shellReadCommand(TAOS *con, char *command) { + unsigned hist_counter = history.hend; + char utf8_array[10] = "\0"; + Command cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.buffer = (char *)calloc(1, MAX_COMMAND_SIZE); + cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE); + showOnScreen(&cmd); + + // Read input. + char c; + while (1) { + c = getchar(); + + if (c < 0) { // For UTF-8 + int count = countPrefixOnes(c); + utf8_array[0] = c; + for (int k = 1; k < count; k++) { + c = getchar(); + utf8_array[k] = c; + } + insertChar(&cmd, utf8_array, count); + } else if (c < '\033') { + // Ctrl keys. TODO: Implement ctrl combinations + switch (c) { + case 1: // ctrl A + positionCursorHome(&cmd); + break; + case 3: + printf("\n"); + resetCommand(&cmd, ""); + kill(0, SIGINT); + break; + case 4: // EOF or Ctrl+D + printf("\n"); + taos_close(con); + // write the history + write_history(); + exitShell(); + break; + case 5: // ctrl E + positionCursorEnd(&cmd); + break; + case 8: + backspaceChar(&cmd); + break; + case '\n': + case '\r': + printf("\n"); + if (isReadyGo(&cmd)) { + sprintf(command, "%s%s", cmd.buffer, cmd.command); + tfree(cmd.buffer); + tfree(cmd.command); + return; + } else { + updateBuffer(&cmd); + } + break; + case 12: // Ctrl + L; + system("clear"); + showOnScreen(&cmd); + break; + } + } else if (c == '\033') { + c = getchar(); + switch (c) { + case '[': + c = getchar(); + switch (c) { + case 'A': // Up arrow + if (hist_counter != history.hstart) { + hist_counter = (hist_counter + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE; + resetCommand(&cmd, (history.hist[hist_counter] == NULL) ? "" : history.hist[hist_counter]); + } + break; + case 'B': // Down arrow + if (hist_counter != history.hend) { + int next_hist = (hist_counter + 1) % MAX_HISTORY_SIZE; + + if (next_hist != history.hend) { + resetCommand(&cmd, (history.hist[next_hist] == NULL) ? "" : history.hist[next_hist]); + } else { + resetCommand(&cmd, ""); + } + hist_counter = next_hist; + } + break; + case 'C': // Right arrow + moveCursorRight(&cmd); + break; + case 'D': // Left arrow + moveCursorLeft(&cmd); + break; + case '1': + if ((c = getchar()) == '~') { + // Home key + positionCursorHome(&cmd); + } + break; + case '2': + if ((c = getchar()) == '~') { + // Insert key + } + break; + case '3': + if ((c = getchar()) == '~') { + // Delete key + deleteChar(&cmd); + } + break; + case '4': + if ((c = getchar()) == '~') { + // End key + positionCursorEnd(&cmd); + } + break; + case '5': + if ((c = getchar()) == '~') { + // Page up key + } + break; + case '6': + if ((c = getchar()) == '~') { + // Page down key + } + break; + case 72: + // Home key + positionCursorHome(&cmd); + break; + case 70: + // End key + positionCursorEnd(&cmd); + break; + } + break; + } + } else if (c == 0x7f) { + // press delete key + backspaceChar(&cmd); + } else { + insertChar(&cmd, &c, 1); + } + } +} + +void *shellLoopQuery(void *arg) { + if (indicator) { + get_old_terminal_mode(&oldtio); + indicator = 0; + } + + TAOS *con = (TAOS *)arg; + + pthread_cleanup_push(cleanup_handler, NULL); + + char *command = malloc(MAX_COMMAND_SIZE); + + while (1) { + // Read command from shell. + + memset(command, 0, MAX_COMMAND_SIZE); + set_terminal_mode(); + shellReadCommand(con, command); + reset_terminal_mode(); + + if (command != NULL) { + // Run the command + shellRunCommand(con, command); + } + } + + pthread_cleanup_pop(1); + + return NULL; +} + +void shellPrintNChar(char *str, int width) { + int col_left = width; + wchar_t wc; + while (col_left > 0) { + if (*str == '\0') break; + char *tstr = str; + int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX); + int col_width = wcwidth(wc); + if (col_left < col_width) break; + printf("%lc", wc); + str += byte_width; + col_left -= col_width; + } + + while (col_left > 0) { + printf(" "); + col_left--; + } + printf("|"); +} + +int get_old_terminal_mode(struct termios *tio) { + /* Make sure stdin is a terminal. */ + if (!isatty(STDIN_FILENO)) { + return -1; + } + + // Get the parameter of current terminal + if (tcgetattr(0, &oldtio) != 0) { + return -1; + } + + return 1; +} + +void reset_terminal_mode() { + if (tcsetattr(0, TCSANOW, &oldtio) != 0) { + fprintf(stderr, "Fail to reset the terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void set_terminal_mode() { + struct termios newtio; + + /* if (atexit(reset_terminal_mode) != 0) { */ + /* fprintf(stderr, "Error register exit function!\n"); */ + /* exit(EXIT_FAILURE); */ + /* } */ + + memcpy(&newtio, &oldtio, sizeof(oldtio)); + + // Set new terminal attributes. + newtio.c_iflag &= ~(IXON | IXOFF | ICRNL | INLCR | IGNCR | IMAXBEL | ISTRIP); + newtio.c_iflag |= IGNBRK; + + // newtio.c_oflag &= ~(OPOST|ONLCR|OCRNL|ONLRET); + newtio.c_oflag |= OPOST; + newtio.c_oflag |= ONLCR; + newtio.c_oflag &= ~(OCRNL | ONLRET); + + newtio.c_lflag &= ~(IEXTEN | ICANON | ECHO | ECHOE | ECHONL | ECHOCTL | ECHOPRT | ECHOKE | ISIG); + newtio.c_cc[VMIN] = 1; + newtio.c_cc[VTIME] = 0; + + if (tcsetattr(0, TCSANOW, &newtio) != 0) { + fprintf(stderr, "Fail to set terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void get_history_path(char *history) { sprintf(history, "%s/%s", getpwuid(getuid())->pw_dir, HISTORY_FILE); } + +void clearScreen(int ecmd_pos, int cursor_pos) { + struct winsize w; + ioctl(0, TIOCGWINSZ, &w); + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + positionCursor(cursor_y, LEFT); + positionCursor(command_x - cursor_x, DOWN); + fprintf(stdout, "\033[2K"); + for (int i = 0; i < command_x; i++) { + positionCursor(1, UP); + fprintf(stdout, "\033[2K"); + } + fflush(stdout); +} + +void showOnScreen(Command *cmd) { + struct winsize w; + if (ioctl(0, TIOCGWINSZ, &w) < 0 || w.ws_col == 0 || w.ws_row == 0) { + fprintf(stderr, "No stream device\n"); + exit(EXIT_FAILURE); + } + + wchar_t wc; + int size = 0; + + // Print out the command. + char *total_string = malloc(MAX_COMMAND_SIZE); + memset(total_string, '\0', MAX_COMMAND_SIZE); + if (strcmp(cmd->buffer, "") == 0) { + sprintf(total_string, "%s%s", PROMPT_HEADER, cmd->command); + } else { + sprintf(total_string, "%s%s", CONTINUE_PROMPT, cmd->command); + } + + int remain_column = w.ws_col; + /* size = cmd->commandSize + prompt_size; */ + for (char *str = total_string; size < cmd->commandSize + prompt_size;) { + int ret = mbtowc(&wc, str, MB_CUR_MAX); + if (ret < 0) break; + size += ret; + /* assert(size >= 0); */ + int width = wcwidth(wc); + if (remain_column > width) { + printf("%lc", wc); + remain_column -= width; + } else { + if (remain_column == width) { + printf("%lc\n\r", wc); + remain_column = w.ws_col; + } else { + printf("\n\r%lc", wc); + remain_column = w.ws_col - width; + } + } + + str = total_string + size; + } + + free(total_string); + /* for (int i = 0; i < size; i++){ */ + /* char c = total_string[i]; */ + /* if (k % w.ws_col == 0) { */ + /* printf("%c\n\r", c); */ + /* } */ + /* else { */ + /* printf("%c", c); */ + /* } */ + /* k += 1; */ + /* } */ + + // Position the cursor + int cursor_pos = cmd->screenOffset + prompt_size; + int ecmd_pos = cmd->endOffset + prompt_size; + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + // int cursor_y = cursor % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + int command_y = ecmd_pos % w.ws_col; + // int command_y = (command.size() + prompt_size) % w.ws_col; + positionCursor(command_y, LEFT); + positionCursor(command_x, UP); + positionCursor(cursor_x, DOWN); + positionCursor(cursor_y, RIGHT); + fflush(stdout); +} + +void cleanup_handler(void *arg) { tcsetattr(0, TCSANOW, &oldtio); } + +void exitShell() { + tcsetattr(0, TCSANOW, &oldtio); + exit(EXIT_SUCCESS); +} diff --git a/src/kit/shell/shellMain.c b/src/kit/shell/shellMain.c new file mode 100644 index 000000000000..7c71369f95aa --- /dev/null +++ b/src/kit/shell/shellMain.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "shell.h" +#include "tsclient.h" +#include "tutil.h" + +TAOS* con; +pthread_t pid; +int32_t TIMESTAMP_OUTPUT_LENGTH = 22; + +// TODO: IMPLEMENT INTERRUPT HANDLER. +void interruptHandler(int signum) { + TAOS_RES* res = taos_use_result(con); + taos_stop_query(res); + if (res != NULL) { + /* + * we need to free result in async model, in order to avoid free + * results while the master thread is waiting for server response. + */ + tscQueueAsyncFreeResult(res); + } + result = NULL; +} + +int checkVersion() { + if (sizeof(int8_t) != 1) { + printf("taos int8 size is %d(!= 1)", (int)sizeof(int8_t)); + return 0; + } + if (sizeof(int16_t) != 2) { + printf("taos int16 size is %d(!= 2)", (int)sizeof(int16_t)); + return 0; + } + if (sizeof(int32_t) != 4) { + printf("taos int32 size is %d(!= 4)", (int)sizeof(int32_t)); + return 0; + } + if (sizeof(int64_t) != 8) { + printf("taos int64 size is %d(!= 8)", (int)sizeof(int64_t)); + return 0; + } + return 1; +} + +// Global configurations +struct arguments args = {NULL, NULL, NULL, NULL, NULL, false, false, "\0", NULL}; + +/* + * Main function. + */ +int main(int argc, char* argv[]) { + /*setlocale(LC_ALL, "en_US.UTF-8"); */ + + if (!checkVersion()) { + exit(EXIT_FAILURE); + } + + shellParseArgument(argc, argv, &args); + + /* Initialize the shell */ + con = shellInit(&args); + if (con == NULL) { + taos_error(con); + exit(EXIT_FAILURE); + } + + /* Interupt handler. */ + struct sigaction act; + act.sa_handler = interruptHandler; + sigaction(SIGTERM, &act, NULL); + sigaction(SIGINT, &act, NULL); + + /* Loop to query the input. */ + while (1) { + pthread_create(&pid, NULL, shellLoopQuery, con); + pthread_join(pid, NULL); + } + return 0; +} diff --git a/src/kit/taosBenchmark/CMakeLists.txt b/src/kit/taosBenchmark/CMakeLists.txt new file mode 100644 index 000000000000..4b6cf8c64b96 --- /dev/null +++ b/src/kit/taosBenchmark/CMakeLists.txt @@ -0,0 +1,9 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(. SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ../../client/inc) + +ADD_EXECUTABLE(taosBenchmark ${SRC}) +TARGET_LINK_LIBRARIES(taosBenchmark taos_static trpc tutil pthread) diff --git a/src/kit/taosBenchmark/taosBenchmark.c b/src/kit/taosBenchmark/taosBenchmark.c new file mode 100644 index 000000000000..296eac79b167 --- /dev/null +++ b/src/kit/taosBenchmark/taosBenchmark.c @@ -0,0 +1,828 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +/* #define _GNU_SOURCE */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taos.h" + +#define BUFFER_SIZE 65536 +#define MAX_DB_NAME_SIZE 64 +#define MAX_TB_NAME_SIZE 64 +#define MAX_DATA_SIZE 1024 +#define MAX_NUM_DATATYPE 8 +#define OPT_ABORT 1 /* –abort */ + +/* The options we understand. */ +static struct argp_option options[] = { + {0, 'h', "host", 0, "The host to connect to TDEngine. Default is localhost.", 0}, + {0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1}, + {0, 'u', "user", 0, "The TDEngine user name to use when connecting to the server. Default is 'root'.", 2}, + {0, 'a', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3}, + {0, 'd', "database", 0, "Destination database. Default is 'test'.", 3}, + {0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3}, + {0, 'M', 0, 0, "Use metric flag.", 13}, + {0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14}, + {0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6}, + {0, 'b', "type_of_cols", 0, "The data_type of columns: 'INT', 'TINYINT', 'SMALLINT', 'BIGINT', 'FLOAT', 'DOUBLE', 'BINARY'. Default is 'INT'.", 7}, + {0, 'w', "length_of_binary", 0, "The length of data_type 'BINARY'. Only applicable when type of cols is 'BINARY'. Default is 8", 8}, + {0, 'l', "num_of_cols_per_record", 0, "The number of columns per record. Default is 1.", 8}, + {0, 'c', "num_of_conns", 0, "The number of connections. Default is 1.", 9}, + {0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1.", 10}, + {0, 't', "num_of_tables", 0, "The number of tables. Default is 1.", 11}, + {0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 50000.", 12}, + {0, 'f', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14}, + {0, 'x', 0, 0, "Insert only flag.", 13}, + {0}}; + +/* Used by main to communicate with parse_opt. */ +struct arguments { + char *host; + int port; + char *user; + char *password; + char *database; + char *tb_prefix; + bool use_metric; + bool insert_only; + char *output_file; + int mode; + char *datatype[MAX_NUM_DATATYPE]; + int len_of_binary; + int num_of_CPR; + int num_of_connections; + int num_of_RPR; + int num_of_tables; + int num_of_DPT; + int abort; + char **arg_list; +}; + +/* Parse a single option. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + /* Get the input argument from argp_parse, which we + know is a pointer to our arguments structure. */ + struct arguments *arguments = state->input; + wordexp_t full_path; + char **sptr; + switch (key) { + case 'h': + arguments->host = arg; + break; + case 'p': + arguments->port = atoi(arg); + break; + case 'u': + arguments->user = arg; + break; + case 'a': + arguments->password = arg; + break; + case 'o': + arguments->output_file = arg; + break; + case 'q': + arguments->mode = atoi(arg); + break; + case 'c': + arguments->num_of_connections = atoi(arg); + break; + case 'r': + arguments->num_of_RPR = atoi(arg); + break; + case 't': + arguments->num_of_tables = atoi(arg); + break; + case 'n': + arguments->num_of_DPT = atoi(arg); + break; + case 'd': + arguments->database = arg; + break; + case 'l': + arguments->num_of_CPR = atoi(arg); + break; + case 'b': + sptr = arguments->datatype; + if (strstr(arg, ",") == NULL) { + if (strcasecmp(arg, "INT") != 0 && strcasecmp(arg, "FLOAT") != 0 && + strcasecmp(arg, "TINYINT") != 0 && strcasecmp(arg, "BOOL") != 0 && + strcasecmp(arg, "SMALLINT") != 0 && + strcasecmp(arg, "BIGINT") != 0 && strcasecmp(arg, "DOUBLE") != 0 && + strcasecmp(arg, "BINARY")) { + argp_error(state, "Invalid data_type!"); + } + sptr[0] = arg; + } else { + int index = 0; + char *dupstr = strdup(arg); + char *running = dupstr; + char *token = strsep(&running, ","); + while (token != NULL) { + if (strcasecmp(token, "INT") != 0 && + strcasecmp(token, "FLOAT") != 0 && + strcasecmp(token, "TINYINT") != 0 && + strcasecmp(token, "BOOL") != 0 && + strcasecmp(token, "SMALLINT") != 0 && + strcasecmp(token, "BIGINT") != 0 && + strcasecmp(token, "DOUBLE") != 0 && strcasecmp(token, "BINARY")) { + argp_error(state, "Invalid data_type!"); + } + sptr[index++] = token; + token = strsep(&running, ", "); + } + } + break; + case 'w': + arguments->len_of_binary = atoi(arg); + break; + case 'm': + arguments->tb_prefix = arg; + break; + case 'M': + arguments->use_metric = true; + break; + case 'x': + arguments->insert_only = true; + break; + case 'f': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(configDir, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case OPT_ABORT: + arguments->abort = 1; + break; + case ARGP_KEY_ARG: + /*arguments->arg_list = &state->argv[state->next-1]; + state->next = state->argc;*/ + argp_usage(state); + break; + + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +/* ******************************* Structure + * definition******************************* */ +enum MODE { + SYNC, ASYNC +}; +typedef struct { + TAOS *taos; + int threadID; + char db_name[MAX_DB_NAME_SIZE]; + char fp[4096]; + char **datatype; + int len_of_binary; + char tb_prefix[MAX_TB_NAME_SIZE]; + int start_table_id; + int end_table_id; + int ncols_per_record; + int nrecords_per_table; + int nrecords_per_request; + long start_time; + bool do_aggreFunc; + + sem_t mutex_sem; + int notFinished; + sem_t lock_sem; +} info; + +typedef struct { + TAOS *taos; + + char tb_name[MAX_TB_NAME_SIZE]; + long timestamp; + int target; + int counter; + int nrecords_per_request; + int ncols_per_record; + char **data_type; + int len_of_binary; + + sem_t *mutex_sem; + int *notFinished; + sem_t *lock_sem; +} sTable; + +/* ******************************* Global + * variables******************************* */ +char *aggreFunc[] = {"*", "count(*)", "avg(f1)", "sum(f1)", "max(f1)", "min(f1)", "first(f1)", "last(f1)"}; + +/* ******************************* Global + * functions******************************* */ +static struct argp argp = {options, parse_opt, 0, 0}; + +void queryDB(TAOS *taos, char *command); + +void *readTable(void *sarg); + +void *readMetric(void *sarg); + +void *syncWrite(void *sarg); + +void *asyncWrite(void *sarg); + +void generateData(char *res, char **data_type, int num_of_cols, long timestamp, int len_of_binary); + +void rand_string(char *str, int size); + +double getCurrentTime(); + +void callBack(void *param, TAOS_RES *res, int code); + +int main(int argc, char *argv[]) { + struct arguments arguments = {NULL, + 0, + "root", + "taosdata", + "test", + "t", + false, + false, + "./output.txt", + 0, + "int", + "", + "", + "", + "", + "", + "", + "", + 8, + 1, + 1, + 1, + 1, + 50000}; + + /* Parse our arguments; every option seen by parse_opt will be + reflected in arguments. */ + argp_parse(&argp, argc, argv, 0, 0, &arguments); + + if (arguments.abort) error(10, 0, "ABORTED"); + + enum MODE query_mode = arguments.mode; + char *ip_addr = arguments.host; + int port = arguments.port; + char *user = arguments.user; + char *pass = arguments.password; + char *db_name = arguments.database; + char *tb_prefix = arguments.tb_prefix; + int len_of_binary = arguments.len_of_binary; + int ncols_per_record = arguments.num_of_CPR; + int ntables = arguments.num_of_tables; + int nconnections = arguments.num_of_connections; + int nrecords_per_table = arguments.num_of_DPT; + int nrecords_per_request = arguments.num_of_RPR; + bool use_metric = arguments.use_metric; + bool insert_only = arguments.insert_only; + char **data_type = arguments.datatype; + int count_data_type = 0; + char dataString[512]; + bool do_aggreFunc = true; + if (strcasecmp(data_type[0], "BINARY") == 0 || strcasecmp(data_type[0], "BOOL") == 0) { + do_aggreFunc = false; + } + for (; count_data_type <= MAX_NUM_DATATYPE; count_data_type++) { + if (strcasecmp(data_type[count_data_type], "") == 0) { + break; + } + strcat(dataString, data_type[count_data_type]); + strcat(dataString, " "); + } + + FILE *fp = fopen(arguments.output_file, "a"); + time_t tTime = time(NULL); + struct tm tm = *localtime(&tTime); + + fprintf(fp, "###################################################################\n"); + fprintf(fp, "# Server IP: %s:%d\n", ip_addr == NULL ? "localhost" : ip_addr, port); + fprintf(fp, "# User: %s\n", user); + fprintf(fp, "# Password: %s\n", pass); + fprintf(fp, "# Use metric: %s\n", use_metric ? "true" : "false"); + fprintf(fp, "# Datatype of Columns: %s\n", dataString); + fprintf(fp, "# Binary Length(If applicable): %d\n", + (strcasestr(dataString, "BINARY") != NULL) ? len_of_binary : -1); + fprintf(fp, "# Number of Columns per record: %d\n", ncols_per_record); + fprintf(fp, "# Number of Connections: %d\n", nconnections); + fprintf(fp, "# Number of Tables: %d\n", ntables); + fprintf(fp, "# Number of Data per Table: %d\n", nrecords_per_table); + fprintf(fp, "# Records/Request: %d\n", nrecords_per_request); + fprintf(fp, "# Database name: %s\n", db_name); + fprintf(fp, "# Table prefix: %s\n", tb_prefix); + fprintf(fp, "# Test time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + fprintf(fp, "###################################################################\n\n"); + fprintf(fp, "| WRecords | Records/Second | Requests/Second | WLatency(ms) |\n"); + + taos_init(); + TAOS *taos = taos_connect(ip_addr, user, pass, NULL, port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(taos)); + taos_close(taos); + return 1; + } + char command[BUFFER_SIZE] = "\0"; + + sprintf(command, "drop database %s;", db_name); + taos_query(taos, command); + sleep(3); + + sprintf(command, "create database %s;", db_name); + taos_query(taos, command); + + char cols[512] = "\0"; + int colIndex = 0; + + for (; colIndex < ncols_per_record - 1; colIndex++) { + if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) { + sprintf(command, ",f%d %s", colIndex + 1, data_type[colIndex % count_data_type]); + strcat(cols, command); + } else { + sprintf(command, ",f%d %s(%d)", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); + strcat(cols, command); + } + } + + if (strcasecmp(data_type[colIndex % count_data_type], "BINARY") != 0) { + sprintf(command, ",f%d %s)", colIndex + 1, data_type[colIndex % count_data_type]); + } else { + sprintf(command, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary); + } + + strcat(cols, command); + + if (!use_metric) { + /* Create all the tables; */ + printf("Creating %d table(s)......\n", ntables); + for (int i = 0; i < ntables; i++) { + sprintf(command, "create table %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols); + queryDB(taos, command); + } + + printf("Table(s) created!\n"); + taos_close(taos); + + } else { + /* Create metric table */ + printf("Creating metric table...\n"); + sprintf(command, "create table %s.m1 (ts timestamp%s tags (index int)", db_name, cols); + queryDB(taos, command); + printf("Metric created!\n"); + + /* Create all the tables; */ + printf("Creating %d table(s)......\n", ntables); + for (int i = 0; i < ntables; i++) { + int j; + if (i % 10 == 0) { + j = 10; + } else { + j = i % 10; + } + sprintf(command, "create table %s.%s%d using %s.m1 tags(%d);", db_name, tb_prefix, i, db_name, j); + queryDB(taos, command); + } + + printf("Table(s) created!\n"); + taos_close(taos); + } + /* Wait for table to create */ + sleep(5); + + /* Insert data */ + double ts = getCurrentTime(); + printf("Inserting data......\n"); + pthread_t *pids = malloc(nconnections * sizeof(pthread_t)); + info *infos = malloc(nconnections * sizeof(info)); + + int a = ntables / nconnections; + if (a < 1) { + nconnections = ntables; + a = 1; + } + int b = ntables % nconnections; + int last = 0; + for (int i = 0; i < nconnections; i++) { + info *t_info = infos + i; + t_info->threadID = i; + strcpy(t_info->db_name, db_name); + strcpy(t_info->tb_prefix, tb_prefix); + t_info->datatype = data_type; + t_info->ncols_per_record = ncols_per_record; + t_info->nrecords_per_table = nrecords_per_table; + t_info->start_time = 1500000000000; + t_info->taos = taos_connect(ip_addr, user, pass, db_name, port); + t_info->len_of_binary = len_of_binary; + t_info->nrecords_per_request = nrecords_per_request; + t_info->start_table_id = last; + t_info->end_table_id = i < b ? last + a : last + a - 1; + last = t_info->end_table_id + 1; + + sem_init(&(t_info->mutex_sem), 0, 1); + t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1; + sem_init(&(t_info->lock_sem), 0, 0); + + if (query_mode == SYNC) { + pthread_create(pids + i, NULL, syncWrite, t_info); + } else { + pthread_create(pids + i, NULL, asyncWrite, t_info); + } + } + for (int i = 0; i < nconnections; i++) { + pthread_join(pids[i], NULL); + } + + double t = getCurrentTime() - ts; + if (query_mode == SYNC) { + printf("SYNC Insert with %d connections:\n", nconnections); + } else { + printf("ASYNC Insert with %d connections:\n", nconnections); + } + + fprintf(fp, "|%10.d | %10.2f | %10.2f | %10.4f |\n\n", + ntables * nrecords_per_table, ntables * nrecords_per_table / t, + (ntables * nrecords_per_table) / (t * nrecords_per_request), + t * 1000); + + printf("Spent %.4f seconds to insert %d records with %d record(s) per request: %.2f records/second\n", + t, ntables * nrecords_per_table, nrecords_per_request, + ntables * nrecords_per_table / t); + + for (int i = 0; i < nconnections; i++) { + info *t_info = infos + i; + taos_close(t_info->taos); + sem_destroy(&(t_info->mutex_sem)); + sem_destroy(&(t_info->lock_sem)); + } + + free(pids); + free(infos); + fclose(fp); + + if (!insert_only) { + // query data + pthread_t read_id; + info *rInfo = malloc(sizeof(info)); + rInfo->start_time = 1500000000000; + rInfo->start_table_id = 0; + rInfo->end_table_id = ntables - 1; + rInfo->do_aggreFunc = do_aggreFunc; + rInfo->nrecords_per_table = nrecords_per_table; + rInfo->taos = taos_connect(ip_addr, user, pass, db_name, port); + strcpy(rInfo->tb_prefix, tb_prefix); + strcpy(rInfo->fp, arguments.output_file); + + if (!use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); + } + + return 0; +} + +void *readTable(void *sarg) { + info *rinfo = (info *)sarg; + TAOS *taos = rinfo->taos; + char command[BUFFER_SIZE] = "\0"; + long sTime = rinfo->start_time; + char *tb_prefix = rinfo->tb_prefix; + FILE *fp = fopen(rinfo->fp, "a"); + int num_of_DPT = rinfo->nrecords_per_table; + int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; + int totalData = num_of_DPT * num_of_tables; + bool do_aggreFunc = rinfo->do_aggreFunc; + + int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; + if (!do_aggreFunc) { + printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); + } + printf("%d records:\n", totalData); + fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); + + for (int j = 0; j < n; j++) { + double totalT = 0; + int count = 0; + for (int i = 0; i < num_of_tables; i++) { + sprintf(command, "select %s from %s%d where ts>= %ld", aggreFunc[j], tb_prefix, i, sTime); + + double t = getCurrentTime(); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "Failed to query\n"); + taos_close(taos); + exit(EXIT_FAILURE); + } + + TAOS_RES *result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "Failed to retreive results:%s\n", taos_errstr(taos)); + taos_close(taos); + exit(1); + } + + while (taos_fetch_row(result) != NULL) { + count++; + } + + t = getCurrentTime() - t; + totalT += t; + + taos_free_result(result); + } + + fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n", + aggreFunc[j] == "*" ? " * " : aggreFunc[j], totalData, + (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); + printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT); + } + fprintf(fp, "\n"); + + fclose(fp); + return NULL; +} + +void *readMetric(void *sarg) { + info *rinfo = (info *)sarg; + TAOS *taos = rinfo->taos; + char command[BUFFER_SIZE] = "\0"; + FILE *fp = fopen(rinfo->fp, "a"); + int num_of_DPT = rinfo->nrecords_per_table; + int num_of_tables = rinfo->end_table_id - rinfo->start_table_id + 1; + int totalData = num_of_DPT * num_of_tables; + bool do_aggreFunc = rinfo->do_aggreFunc; + + int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; + if (!do_aggreFunc) { + printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); + } + printf("%d records:\n", totalData); + fprintf(fp, "Querying On %d records:\n", totalData); + + for (int j = 0; j < n; j++) { + char condition[BUFFER_SIZE] = "\0"; + char tempS[BUFFER_SIZE] = "\0"; + + int m = 10 < num_of_tables ? 10 : num_of_tables; + + for (int i = 1; i <= m; i++) { + if (i == 1) { + sprintf(tempS, "index = %d", i); + } else { + sprintf(tempS, " or index = %d ", i); + } + strcat(condition, tempS); + + sprintf(command, "select %s from m1 where %s", aggreFunc[j], condition); + + printf("Where condition: %s\n", condition); + fprintf(fp, "%s\n", command); + + double t = getCurrentTime(); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "Failed to query\n"); + taos_close(taos); + exit(EXIT_FAILURE); + } + + TAOS_RES *result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "Failed to retreive results:%s\n", taos_errstr(taos)); + taos_close(taos); + exit(1); + } + int count = 0; + while (taos_fetch_row(result) != NULL) { + count++; + } + t = getCurrentTime() - t; + + fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000); + printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t); + + taos_free_result(result); + } + fprintf(fp, "\n"); + } + + fclose(fp); + return NULL; +} + +void queryDB(TAOS *taos, char *command) { + if (taos_query(taos, command) != 0) { + fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(taos)); + taos_close(taos); + exit(EXIT_FAILURE); + } +} + +// sync insertion +void *syncWrite(void *sarg) { + info *winfo = (info *)sarg; + char buffer[BUFFER_SIZE] = "\0"; + char data[MAX_DATA_SIZE]; + char **data_type = winfo->datatype; + int len_of_binary = winfo->len_of_binary; + int ncols_per_record = winfo->ncols_per_record; + srand(time(NULL)); + long time_counter = winfo->start_time; + for (int i = 0; i < winfo->nrecords_per_table;) { + for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { + int inserted = i; + long tmp_time = time_counter; + + char *pstr = buffer; + pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, tID); + int k; + for (k = 0; k < winfo->nrecords_per_request;) { + generateData(data, data_type, ncols_per_record, tmp_time++, len_of_binary); + pstr += sprintf(pstr, " %s", data); + inserted++; + k++; + + if (inserted >= winfo->nrecords_per_table) break; + } + + /* puts(buffer); */ + queryDB(winfo->taos, buffer); + + if (tID == winfo->end_table_id) { + i = inserted; + time_counter = tmp_time; + } + } + } + return NULL; +} + +void *asyncWrite(void *sarg) { + info *winfo = (info *)sarg; + + sTable *tb_infos = (sTable *)malloc(sizeof(sTable) * (winfo->end_table_id - winfo->start_table_id + 1)); + + for (int tID = winfo->start_table_id; tID <= winfo->end_table_id; tID++) { + sTable *tb_info = tb_infos + tID - winfo->start_table_id; + tb_info->data_type = winfo->datatype; + tb_info->ncols_per_record = winfo->ncols_per_record; + tb_info->taos = winfo->taos; + sprintf(tb_info->tb_name, "%s.%s%d", winfo->db_name, winfo->tb_prefix, tID); + tb_info->timestamp = winfo->start_time; + tb_info->counter = 0; + tb_info->target = winfo->nrecords_per_table; + tb_info->len_of_binary = winfo->len_of_binary; + tb_info->nrecords_per_request = winfo->nrecords_per_request; + tb_info->mutex_sem = &(winfo->mutex_sem); + tb_info->notFinished = &(winfo->notFinished); + tb_info->lock_sem = &(winfo->lock_sem); + + /* char buff[BUFFER_SIZE] = "\0"; */ + /* sprintf(buff, "insert into %s values (0, 0)", tb_info->tb_name); */ + /* queryDB(tb_info->taos,buff); */ + + taos_query_a(winfo->taos, "show databases", callBack, tb_info); + } + + sem_wait(&(winfo->lock_sem)); + free(tb_infos); + + return NULL; +} + +void callBack(void *param, TAOS_RES *res, int code) { + sTable *tb_info = (sTable *)param; + char **datatype = tb_info->data_type; + int ncols_per_record = tb_info->ncols_per_record; + int len_of_binary = tb_info->len_of_binary; + long tmp_time = tb_info->timestamp; + + if (code < 0) { + fprintf(stderr, "failed to insert data %d:reason; %s\n", code, taos_errstr(tb_info->taos)); + exit(EXIT_FAILURE); + } + + // If finished; + if (tb_info->counter >= tb_info->target) { + sem_wait(tb_info->mutex_sem); + (*(tb_info->notFinished))--; + if (*(tb_info->notFinished) == 0) sem_post(tb_info->lock_sem); + sem_post(tb_info->mutex_sem); + return; + } + + char buffer[BUFFER_SIZE] = "\0"; + char data[MAX_DATA_SIZE]; + char *pstr = buffer; + pstr += sprintf(pstr, "insert into %s values", tb_info->tb_name); + + for (int i = 0; i < tb_info->nrecords_per_request; i++) { + generateData(data, datatype, ncols_per_record, tmp_time++, len_of_binary); + pstr += sprintf(pstr, "%s", data); + tb_info->counter++; + + if (tb_info->counter >= tb_info->target) { + break; + } + } + + taos_query_a(tb_info->taos, buffer, callBack, tb_info); + + taos_free_result(res); +} + +double getCurrentTime() { + struct timeval tv; + if (gettimeofday(&tv, NULL) != 0) { + perror("Failed to get current time in ms"); + exit(EXIT_FAILURE); + } + + return tv.tv_sec + tv.tv_usec / 1E6; +} + +void generateData(char *res, char **data_type, int num_of_cols, long timestamp, int len_of_binary) { + memset(res, 0, MAX_DATA_SIZE); + char *pstr = res; + pstr += sprintf(pstr, "(%ld", timestamp); + int c = 0; + + for (; c < MAX_NUM_DATATYPE; c++) { + if (strcasecmp(data_type[c], "") == 0) { + break; + } + } + + for (int i = 0; i < num_of_cols; i++) { + if (strcasecmp(data_type[i % c], "tinyint") == 0) { + pstr += sprintf(pstr, ", %d", (int)(rand() % 128)); + } else if (strcasecmp(data_type[i % c], "smallint") == 0) { + pstr += sprintf(pstr, ", %d", (int)(rand() % 32767)); + } else if (strcasecmp(data_type[i % c], "int") == 0) { + pstr += sprintf(pstr, ", %d", (int)(rand() % 2147483648)); + } else if (strcasecmp(data_type[i % c], "bigint") == 0) { + pstr += sprintf(pstr, ", %ld", rand() % 2147483648); + } else if (strcasecmp(data_type[i % c], "float") == 0) { + pstr += sprintf(pstr, ", %10.4f", (float)(rand() / 1000)); + } else if (strcasecmp(data_type[i % c], "double") == 0) { + double t = (double)(rand() / 1000000); + pstr += sprintf(pstr, ", %20.8f", t); + } else if (strcasecmp(data_type[i % c], "bool") == 0) { + bool b = rand() & 1; + pstr += sprintf(pstr, ", %s", b ? "true" : "false"); + } else if (strcasecmp(data_type[i % c], "binary") == 0) { + char s[len_of_binary]; + rand_string(s, len_of_binary); + pstr += sprintf(pstr, ", %s", s); + } + } + + pstr += sprintf(pstr, ")"); +} + +void rand_string(char *str, int size) { + memset(str, 0, size); + const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890"; + char *sptr = str; + if (size) { + --size; + for (size_t n = 0; n < size; n++) { + int key = rand() % (int)(sizeof charset - 1); + sptr += sprintf(sptr, "%c", charset[key]); + } + } +} diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt new file mode 100644 index 000000000000..1a65fd005181 --- /dev/null +++ b/src/kit/taosdump/CMakeLists.txt @@ -0,0 +1,9 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(. SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ../../client/inc) + +ADD_EXECUTABLE(taosdump ${SRC}) +TARGET_LINK_LIBRARIES(taosdump taos_static trpc tutil pthread) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c new file mode 100644 index 000000000000..aa0f42925812 --- /dev/null +++ b/src/kit/taosdump/taosdump.c @@ -0,0 +1,1087 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taos.h" +#include "taosmsg.h" +#include "tglobalcfg.h" +#include "tsclient.h" +#include "tsdb.h" +#include "tutil.h" + +#define COMMAND_SIZE 65536 +#define DEFAULT_DUMP_FILE "taosdump.sql" + +typedef struct { + short bytes; + int8_t type; +} SOColInfo; + +// -------------------------- SHOW DATABASE INTERFACE----------------------- +enum _show_db_index { + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_TABLES_INDEX, + TSDB_SHOW_DB_ROWS_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_ABLOCKS_INDEX, + TSDB_SHOW_DB_TBLOCKS_INDEX, + TSDB_SHOW_DB_CTIME_INDEX, + TSDB_SHOW_DB_CLOG_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_MAX_SHOW_DB +}; + +// -----------------------------------------SHOW TABLES CONFIGURE +// ------------------------------------- +enum _show_tables_index { + TSDB_SHOW_TABLES_NAME_INDEX, + TSDB_SHOW_TABLES_CREATED_TIME_INDEX, + TSDB_SHOW_TABLES_COLUMNS_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_MAX_SHOW_TABLES +}; + +// ---------------------------------- DESCRIBE METRIC CONFIGURE +// ------------------------------ +enum _describe_table_index { + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC +}; + +typedef struct { + char field[TSDB_COL_NAME_LEN + 1]; + char type[16]; + int length; + char note[8]; +} SColDes; + +typedef struct { + char name[TSDB_COL_NAME_LEN + 1]; + SColDes cols[]; +} STableDef; + +extern char version[]; + +typedef struct { + char name[TSDB_DB_NAME_LEN + 1]; + int32_t replica; + int32_t days; + int32_t keep; + int32_t tables; + int32_t rows; + int32_t cache; + int32_t ablocks; + int32_t tblocks; + int32_t ctime; + int32_t clog; + int32_t comp; +} SDbInfo; + +typedef struct { + char name[TSDB_METER_NAME_LEN + 1]; + char metric[TSDB_METER_NAME_LEN + 1]; +} STableRecord; + +typedef struct { + bool isMetric; + STableRecord tableRecord; +} STableRecordInfo; + +SDbInfo **dbInfos = NULL; + +const char *argp_program_version = version; +const char *argp_program_bug_address = ""; + +/* Program documentation. */ +static char doc[] = ""; +/* "Argp example #4 -- a program with somewhat more complicated\ */ +/* options\ */ +/* \vThis part of the documentation comes *after* the options;\ */ +/* note that the text is automatically filled, but it's possible\ */ +/* to force a line-break, e.g.\n<-- here."; */ + +/* A description of the arguments we accept. */ +static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-databases\n-i input_file"; + +/* Keys for options without short-options. */ +#define OPT_ABORT 1 /* –abort */ + +/* The options we understand. */ +static struct argp_option options[] = { + // connection option + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"port", 'P', "PORT", 0, "Port to connect", 0}, + // input/output file + {"output", 'o', "OUTPUT", 0, "Output file name.", 1}, + {"input", 'i', "INPUT", 0, "Input file name.", 1}, + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, + // dump unit options + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'B', 0, 0, "Dump assigned databases", 2}, + // dump format options + {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, + {"with-property", 'M', 0, 0, "Dump schema with properties.", 3}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, + {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, + {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, + {0}}; + +/* Used by main to communicate with parse_opt. */ +struct arguments { + // connection option + char *host; + char *user; + char *password; + int port; + // output file + char output[TSDB_FILENAME_LEN + 1]; + char input[TSDB_FILENAME_LEN + 1]; + // dump unit option + bool all_databases; + bool databases; + // dump format option + bool schemaonly; + bool with_property; + int64_t start_time; + int64_t end_time; + int data_batch; + bool allow_sys; + // other options + int abort; + char **arg_list; + int arg_list_len; + bool isDumpIn; +}; + +/* Parse a single option. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + /* Get the input argument from argp_parse, which we + know is a pointer to our arguments structure. */ + struct arguments *arguments = state->input; + wordexp_t full_path; + + switch (key) { + // connection option + case 'a': + arguments->allow_sys = true; + break; + case 'h': + arguments->host = arg; + break; + case 'u': + arguments->user = arg; + break; + case 'p': + arguments->password = arg; + break; + case 'P': + arguments->port = atoi(arg); + break; + // output file + case 'o': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(arguments->output, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case 'i': + arguments->isDumpIn = true; + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(arguments->input, full_path.we_wordv[0]); + wordfree(&full_path); + break; + case 'c': + if (wordexp(arg, &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", arg); + return -1; + } + strcpy(configDir, full_path.we_wordv[0]); + wordfree(&full_path); + break; + // dump unit option + case 'A': + arguments->all_databases = true; + break; + case 'B': + arguments->databases = true; + break; + // dump format option + case 's': + arguments->schemaonly = true; + break; + case 'M': + arguments->with_property = true; + break; + case 'S': + // parse time here. + arguments->start_time = atol(arg); + break; + case 'E': + arguments->end_time = atol(arg); + break; + case 'N': + arguments->data_batch = atoi(arg); + break; + case OPT_ABORT: + arguments->abort = 1; + break; + case ARGP_KEY_ARG: + arguments->arg_list = &state->argv[state->next - 1]; + arguments->arg_list_len = state->argc - state->next + 1; + state->next = state->argc; + break; + + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +/* Our argp parser. */ +static struct argp argp = {options, parse_opt, args_doc, doc}; + +TAOS *taos = NULL; +TAOS_RES *result = NULL; +char *command = NULL; +char *buffer = NULL; + +int taosDumpOut(struct arguments *arguments); + +int taosDumpIn(struct arguments *arguments); + +void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); + +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp); + +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, struct arguments *arguments, FILE *fp); + +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, struct arguments *arguments, + FILE *fp); + +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp); + +int32_t taosDumpMetric(char *metric, struct arguments *arguments, FILE *fp); + +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments); + +int taosCheckParam(struct arguments *arguments); + +void taosFreeDbInfos(); + +int main(int argc, char *argv[]) { + struct arguments arguments = { + // connection option + NULL, "root", "taosdata", 0, + // output file + DEFAULT_DUMP_FILE, DEFAULT_DUMP_FILE, + // dump unit option + false, false, + // dump format option + false, false, 0, INT64_MAX, 1, false, + // other options + 0, NULL, 0, false}; + + /* Parse our arguments; every option seen by parse_opt will be + reflected in arguments. */ + argp_parse(&argp, argc, argv, 0, 0, &arguments); + + if (arguments.abort) error(10, 0, "ABORTED"); + + if (taosCheckParam(&arguments) < 0) { + exit(EXIT_FAILURE); + } + + if (arguments.isDumpIn) { + if (taosDumpIn(&arguments) < 0) return -1; + } else { + if (taosDumpOut(&arguments) < 0) return -1; + } + + return 0; +} + +void taosFreeDbInfos() { + if (dbInfos == NULL) return; + for (int i = 0; i < tsMaxDbs; i++) tfree(dbInfos[i]); + tfree(dbInfos); +} + +int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo) { + TAOS_ROW row = NULL; + bool isSet = false; + + memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); + + sprintf(command, "show tables like %s", table); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command %s\n", command); + return -1; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = false; + strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + break; + } + + taos_free_result(result); + result = NULL; + + if (isSet) return 0; + + sprintf(command, "show stables like %s", table); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command %s\n", command); + return -1; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + return -1; + } + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = true; + strcpy(pTableRecordInfo->tableRecord.metric, table); + break; + } + + taos_free_result(result); + result = NULL; + + if (isSet) return 0; + + fprintf(stderr, "invalid table/metric %s\n", table); + return -1; +} + +int taosDumpOut(struct arguments *arguments) { + TAOS_ROW row; + char *temp = NULL; + FILE *fp = NULL; + int count = 0; + STableRecordInfo tableRecordInfo; + + fp = fopen(arguments->output, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", arguments->output); + return -1; + } + + dbInfos = (SDbInfo **)calloc(tsMaxDbs, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + temp = (char *)malloc(2 * COMMAND_SIZE); + if (temp == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + command = temp; + buffer = command + COMMAND_SIZE; + + /* Connect to server */ + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDEngine server\n"); + goto _exit_failure; + } + + /* --------------------------------- Main Code + * -------------------------------- */ + /* if (arguments->databases || arguments->all_databases) { // dump part of + * databases or all databases */ + /* */ + sprintf(command, "show databases"); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); + goto _exit_failure; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + goto _exit_failure; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "sys", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && + (!arguments->allow_sys)) + continue; + + if (arguments->databases) { + for (int i = 0; arguments->arg_list[i]; i++) { + if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + } + continue; + } else if (!arguments->all_databases) { + if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + else + continue; + } + + _dump_db_point: + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + fprintf(stderr, "failed to allocate memory\n"); + goto _exit_failure; + } + + strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); + dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); + dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); + dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); + dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); + dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); + dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); + dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); + dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); + dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + + count++; + + if (arguments->databases) { + if (count > arguments->arg_list_len) break; + + } else if (!arguments->all_databases) { + if (count >= 1) break; + } + } + + taos_free_result(result); + + if (count == 0) { + fprintf(stderr, "No databases valid to dump\n"); + goto _exit_failure; + } + + if (arguments->databases || arguments->all_databases) { + for (int i = 0; i < count; i++) { + taosDumpDb(dbInfos[i], arguments, fp); + } + } else { + if (arguments->arg_list_len == 1) { + taosDumpDb(dbInfos[0], arguments, fp); + } else { + taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); + + sprintf(command, "use %s", dbInfos[0]->name); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); + goto _exit_failure; + } + + fprintf(fp, "USE %s;\n\n", dbInfos[0]->name); + + for (int i = 1; arguments->arg_list[i]; i++) { + if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo) < 0) { + fprintf(stderr, "invalide table %s\n", arguments->arg_list[i]); + continue; + } + + if (tableRecordInfo.isMetric) { // dump whole metric + taosDumpMetric(tableRecordInfo.tableRecord.metric, arguments, fp); + } else { // dump MTable and NTable + taosDumpTable(tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, arguments, fp); + } + } + } + } + + /* Close the handle and return */ + fclose(fp); + taos_close(taos); + taos_free_result(result); + free(temp); + taosFreeDbInfos(); + return 0; + + _exit_failure: + fclose(fp); + taos_close(taos); + taos_free_result(result); + free(temp); + taosFreeDbInfos(); + return -1; +} + +void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + char *pstr = buffer; + + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); + if (isDumpProperty) { + pstr += sprintf(pstr, + " REPLICA %d DAYS %d KEEP %d TABLES %d ROWS %d CACHE %d ABLOCKS %d TBLOCKS %d CTIME %d CLOG %d COMP %d", + dbInfo->replica, dbInfo->days, dbInfo->keep, dbInfo->tables, dbInfo->rows, dbInfo->cache, + dbInfo->ablocks, dbInfo->tblocks, dbInfo->ctime, dbInfo->clog, dbInfo->comp); + } + + fprintf(fp, "%s\n\n", buffer); +} + +int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + + taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); + + sprintf(command, "use %s", dbInfo->name); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "invalid database %s\n", dbInfo->name); + return -1; + } + + fprintf(fp, "USE %s\n\n", dbInfo->name); + + sprintf(command, "show tables"); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command %s\n", command); + return -1; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + fd = open(".table.tmp", O_RDWR | O_CREAT); + if (fd == -1) { + fprintf(stderr, "failed to open temp file\n"); + taos_free_result(result); + return -1; + } + + while ((row = taos_fetch_row(result)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); + strncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + + write(fd, &tableRecord, sizeof(STableRecord)); + } + + taos_free_result(result); + + lseek(fd, 0, SEEK_SET); + + while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) { + taosDumpTable(tableRecord.name, tableRecord.metric, arguments, fp); + } + + tclose(fd); + remove(".table.tmp"); + + return 0; +} + +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, struct arguments *arguments, FILE *fp) { + char *pstr = NULL; + pstr = buffer; + int counter = 0; + int count_temp = 0; + + pstr += sprintf(buffer, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + + if (counter == 0) { + pstr += sprintf(pstr, " (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } + + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } + } + + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter == count_temp) { + pstr += sprintf(pstr, ") TAGS (%s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", tableDes->cols[counter].field, tableDes->cols[counter].type); + } + + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } + } + + pstr += sprintf(pstr, ")"); + + fprintf(fp, "%s\n\n", buffer); +} + +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, struct arguments *arguments, + FILE *fp) { + char *pstr = NULL; + pstr = buffer; + int counter = 0; + int count_temp = 0; + + pstr += sprintf(buffer, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + } + + assert(counter < numOfCols); + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter != count_temp) { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } + } else { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } + /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */ + } + + /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + * strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { */ + /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */ + /* } */ + } + + pstr += sprintf(pstr, ")"); + + fprintf(fp, "%s\n\n", buffer); +} + +int taosGetTableDes(char *table, STableDef *tableDes) { + TAOS_ROW row = NULL; + int count = 0; + + sprintf(command, "describe %s", table); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command %s\n", command); + return -1; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + strcpy(tableDes->name, table); + + while ((row = taos_fetch_row(result)) != NULL) { + strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + + count++; + } + + taos_free_result(result); + result = NULL; + + return count; +} + +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp) { + int count = 0; + + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + + if (metric != NULL && metric[0] != '\0') { // dump metric definition + count = taosGetTableDes(metric, tableDes); + + if (count < 0) return -1; + + taosDumpCreateTableClause(tableDes, count, arguments, fp); + + memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + + count = taosGetTableDes(table, tableDes); + + if (count < 0) return -1; + + taosDumpCreateMTableClause(tableDes, metric, count, arguments, fp); + + } else { // dump table definition + count = taosGetTableDes(table, tableDes); + + if (count < 0) return -1; + + taosDumpCreateTableClause(tableDes, count, arguments, fp); + } + + return taosDumpTableData(fp, table, arguments); +} + +int32_t taosDumpMetric(char *metric, struct arguments *arguments, FILE *fp) { + TAOS_ROW row = NULL; + int fd = -1; + STableRecord tableRecord; + + strcpy(tableRecord.metric, metric); + + sprintf(command, "select tbname from %s", metric); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command %s\n", command); + return -1; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + return -1; + } + + fd = open(".table.tmp", O_RDWR | O_CREAT); + if (fd < 0) { + fprintf(stderr, "failed to open temp file"); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + strncpy(tableRecord.name, (char *)row[0], fields[0].bytes); + strcpy(tableRecord.metric, metric); + write(fd, &tableRecord, sizeof(STableRecord)); + } + + taos_free_result(result); + result = NULL; + + lseek(fd, 0, SEEK_SET); + + while (read(fd, &tableRecord, sizeof(STableRecord)) > 0) { + taosDumpTable(tableRecord.name, tableRecord.metric, arguments, fp); + } + + tclose(fd); + remove(".table.tmp"); + + return 0; +} + +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { + /* char temp[MAX_COMMAND_SIZE] = "\0"; */ + int count = 0; + char *pstr = NULL; + TAOS_ROW row = NULL; + int numFields = 0; + int lstr = 0; + + if (arguments->schemaonly) return 0; + + sprintf(command, "select * from %s where _c0 >= %ld and _c0 <= %ld order by _c0 asc", tbname, arguments->start_time, + arguments->end_time); + if (taos_query(taos, command) != 0) { + fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(taos)); + return -1; + } + + result = taos_use_result(taos); + if (result == NULL) { + fprintf(stderr, "failed to use result\n"); + return -1; + } + + numFields = taos_field_count(taos); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(result); + + count = 0; + while ((row = taos_fetch_row(result)) != NULL) { + pstr = buffer; + + if (count == 0) { + pstr += sprintf(pstr, "INSERT INTO %s VALUES (", tbname); + } else { + pstr += sprintf(pstr, "("); + } + + for (int col = 0; col < numFields; col++) { + if (col != 0) pstr += sprintf(pstr, ", "); + + if (row[col] == NULL) { + pstr += sprintf(pstr, "NULL"); + continue; + } + + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + pstr += sprintf(pstr, "%d", ((((int)(*((char *)row[col]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + pstr += sprintf(pstr, "%d", (int)(*((char *)row[col]))); + break; + case TSDB_DATA_TYPE_SMALLINT: + pstr += sprintf(pstr, "%d", (int)(*((short *)row[col]))); + break; + case TSDB_DATA_TYPE_INT: + pstr += sprintf(pstr, "%d", *((int *)row[col])); + break; + case TSDB_DATA_TYPE_BIGINT: + pstr += sprintf(pstr, "%ld", *((int64_t *)row[col])); + break; + case TSDB_DATA_TYPE_FLOAT: + pstr += sprintf(pstr, "%f", *((float *)row[col])); + break; + case TSDB_DATA_TYPE_DOUBLE: + pstr += sprintf(pstr, "%f", *((double *)row[col])); + break; + case TSDB_DATA_TYPE_BINARY: + *(pstr++) = '\''; + for (lstr = 0; lstr < fields[col].bytes; lstr++) { + if (((char *)row[col])[lstr] == '\0') break; + *(pstr++) = ((char *)row[col])[lstr]; + } + *(pstr++) = '\''; + break; + case TSDB_DATA_TYPE_NCHAR: + pstr += sprintf(pstr, "\'%s\'", (char *)row[col]); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + pstr += sprintf(pstr, "%ld", *(int64_t *)row[col]); + break; + default: + break; + } + } + pstr += sprintf(pstr, ")"); + + count++; + fprintf(fp, "%s", buffer); + + if (count >= arguments->data_batch) { + fprintf(fp, "\n"); + count = 0; + } else { + fprintf(fp, "\\\n"); + } + } + + fprintf(fp, "\n"); + + taos_free_result(result); + result = NULL; + return 0; +} + +int taosCheckParam(struct arguments *arguments) { + if (arguments->all_databases && arguments->databases) { + fprintf(stderr, "conflict option --all-databases and --databases\n"); + return -1; + } + + if (arguments->start_time > arguments->end_time) { + fprintf(stderr, "start time is larger than end time\n"); + return -1; + } + if (arguments->arg_list_len == 0) { + if ((!arguments->all_databases) && (!arguments->isDumpIn)) { + fprintf(stderr, "taosdump requirs parameters\n"); + return -1; + } + } + + if (arguments->isDumpIn && (strcmp(arguments->output, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file\n"); + return -1; + } + + return 0; +} + +bool isEmptyCommand(char *cmd) { + char *pchar = cmd; + + while (*pchar != '\0') { + if (*pchar != ' ') return false; + pchar++; + } + + return true; +} + +int taosDumpIn(struct arguments *arguments) { + assert(arguments->isDumpIn); + + int tsize = 0; + FILE *fp = NULL; + char *line = NULL; + bool isRun = true; + size_t line_size = 0; + char *pstr = NULL; + + fp = fopen(arguments->input, "r"); + if (fp == NULL) { + fprintf(stderr, "failed to open input file %s\n", arguments->input); + return -1; + } + + taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); + if (taos == NULL) { + fprintf(stderr, "failed to connect to TDEngine server\n"); + goto _dumpin_exit_failure; + } + + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { + fprintf(stderr, "failed to connect to allocate memory\n"); + goto _dumpin_exit_failure; + } + + pstr = command; + while (1) { + ssize_t size = getline(&line, &line_size, fp); + if (size <= 0) break; + if (size == 1) { + if (pstr != command) { + if (taos_query(taos, command) != 0) + fprintf(stderr, "failed to run command %s reason:%s \ncontinue...\n", command, taos_errstr(taos)); + + pstr = command; + pstr[0] = '\0'; + tsize = 0; + isRun = true; + } + + continue; + } + + /* if (line[0] == '-' && line[1] == '-') continue; */ + + line[size - 1] = 0; + + if (tsize + size - 1 > COMMAND_SIZE) { + fprintf(stderr, "command is too long\n"); + goto _dumpin_exit_failure; + } + + if (line[size - 2] == '\\') { + line[size - 2] = ' '; + isRun = false; + } else { + isRun = true; + } + + memcpy(pstr, line, size - 1); + pstr += (size - 1); + *pstr = '\0'; + + if (!isRun) continue; + + if (command != pstr && !isEmptyCommand(command)) { + if (taos_query(taos, command) != 0) + fprintf(stderr, "failed to run command %s reason: %s \ncontinue...\n", command, taos_errstr(taos)); + } + + pstr = command; + *pstr = '\0'; + tsize = 0; + } + + if (pstr != command) { + if (taos_query(taos, command) != 0) + fprintf(stderr, "failed to run command %s reason:%s \ncontinue...\n", command, taos_errstr(taos)); + } + + tfree(line); + tfree(command); + taos_close(taos); + fclose(fp); + return 0; + + _dumpin_exit_failure: + tfree(command); + taos_close(taos); + fclose(fp); + return -1; +} diff --git a/src/modules/http/CMakeLists.txt b/src/modules/http/CMakeLists.txt new file mode 100755 index 000000000000..b21bd66bfc23 --- /dev/null +++ b/src/modules/http/CMakeLists.txt @@ -0,0 +1,9 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc ../http/inc ../../../deps/inc ../../client/inc) + +ADD_LIBRARY(http ${SRC}) +TARGET_LINK_LIBRARIES(http taos_static trpc tutil z) diff --git a/src/modules/http/inc/cJSON.h b/src/modules/http/inc/cJSON.h new file mode 100644 index 000000000000..31c6d19e7856 --- /dev/null +++ b/src/modules/http/inc/cJSON.h @@ -0,0 +1,264 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +#ifndef cJSON__h +#define cJSON__h + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* project version */ +#define CJSON_VERSION_MAJOR 1 +#define CJSON_VERSION_MINOR 5 +#define CJSON_VERSION_PATCH 9 + +#include +#include + +/* cJSON Types: */ +#define cJSON_Invalid (0) +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + +#define cJSON_IsReference 256 +#define cJSON_StringIsConst 512 + +/* The cJSON structure: */ +typedef struct cJSON +{ + /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ + int64_t valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ + char *string; +} cJSON; + +typedef struct cJSON_Hooks +{ + void *(*malloc_fn)(size_t sz); + void (*free_fn)(void *ptr); +} cJSON_Hooks; + +typedef int cJSON_bool; + +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif +#ifdef __WINDOWS__ + +/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 2 define options: + +CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols +CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) +CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol + +For *nix builds that support visibility attribute, you can define similar behavior by + +setting default visibility to hidden by adding +-fvisibility=hidden (for gcc) +or +-xldscope=hidden (for sun cc) +to CFLAGS + +then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does + +*/ + +/* export symbols by default, this is necessary for copy pasting the C and header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_EXPORT_SYMBOLS +#endif + +#if defined(CJSON_HIDE_SYMBOLS) +#define CJSON_PUBLIC(type) type __stdcall +#elif defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllexport) type __stdcall +#elif defined(CJSON_IMPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllimport) type __stdcall +#endif +#else /* !WIN32 */ +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#else +#define CJSON_PUBLIC(type) type +#endif +#endif + +/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. + * This is to prevent stack overflows. */ +#ifndef CJSON_NESTING_LIMIT +#define CJSON_NESTING_LIMIT 1000 +#endif + +/* returns the version of cJSON as a string */ +CJSON_PUBLIC(const char*) cJSON_Version(void); + +/* Supply malloc, realloc and free functions to cJSON */ +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); + +/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); +/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ +/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); + +/* Render a cJSON entity to text for transfer/storage. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); +/* Render a cJSON entity to text for transfer/storage without any formatting. */ +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); +/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ +/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); +/* Delete a cJSON entity and all subentities. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *c); + +/* Returns the number of items in an array (or object). */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); +/* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. */ +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); +/* Get item "string" from object. Case insensitive. */ +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); +/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); + +/* These functions check the type of an item */ +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); + +/* These calls create a cJSON item of the appropriate type. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); +/* raw json */ +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); + +/* These utilities create an Array of count items. */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count); + +/* Append item to the specified array/object. */ +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. + * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before + * writing to `item->string` */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); +/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + +/* Remove/Detatch items from Arrays/Objects. */ +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); + +/* Update array items. */ +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem); + +/* Duplicate a cJSON item */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will +need to be released. With recurse!=0, it will duplicate any children connected to the item. +The item->next and ->prev pointers are always zero on return from Duplicate. */ +/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); + + +CJSON_PUBLIC(void) cJSON_Minify(char *json); + +/* Macros for creating things quickly. */ +#define cJSON_AddNullToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateNull()) +#define cJSON_AddTrueToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateTrue()) +#define cJSON_AddFalseToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateFalse()) +#define cJSON_AddBoolToObject(object,name,b) cJSON_AddItemToObject(object, name, cJSON_CreateBool(b)) +#define cJSON_AddNumberToObject(object,name,n) cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n)) +#define cJSON_AddStringToObject(object,name,s) cJSON_AddItemToObject(object, name, cJSON_CreateString(s)) +#define cJSON_AddRawToObject(object,name,s) cJSON_AddItemToObject(object, name, cJSON_CreateRaw(s)) + +/* When assigning an integer value, it needs to be propagated to valuedouble too. */ +#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) +/* helper for the cJSON_SetNumberValue macro */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); +#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) + +/* Macro for iterating over an array or object */ +#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) + +/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ +CJSON_PUBLIC(void *) cJSON_malloc(size_t size); +CJSON_PUBLIC(void) cJSON_free(void *object); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/modules/http/inc/gcHandle.h b/src/modules/http/inc/gcHandle.h new file mode 100644 index 000000000000..ed1e9302eb07 --- /dev/null +++ b/src/modules/http/inc/gcHandle.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_GC_HANDLE_H +#define TDENGINE_GC_HANDLE_H + +#include +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" + +#define GC_ROOT_URL_POS 0 +#define GC_ACTION_URL_POS 1 +#define GC_USER_URL_POS 2 +#define GC_PASS_URL_POS 3 + +void gcInitHandle(HttpServer* pServer); +bool gcProcessRequest(struct HttpContext* pContext); + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/gcJson.h b/src/modules/http/inc/gcJson.h new file mode 100644 index 000000000000..609bb9b95e62 --- /dev/null +++ b/src/modules/http/inc/gcJson.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_GC_JSON_H +#define TDENGINE_GC_JSON_H +#include "httpHandle.h" +#include "httpJson.h" +#include "taos.h" + +void gcInitQueryJson(HttpContext *pContext); +void gcCleanQueryJson(HttpContext *pContext); + +void gcStartQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result); +void gcStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd); +bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows); + +void gcSendHeartBeatResp(HttpContext *pContext, HttpSqlCmd *cmd); + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/http.h b/src/modules/http/inc/http.h new file mode 100644 index 000000000000..ea209f082c82 --- /dev/null +++ b/src/modules/http/inc/http.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HTTP_H +#define TDENGINE_HTTP_H + +#include "tglobalcfg.h" +#include "tlog.h" + +#define httpError(...) \ + if (httpDebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR HTP ", 255, __VA_ARGS__); \ + } +#define httpWarn(...) \ + if (httpDebugFlag & DEBUG_WARN) { \ + tprintf("WARN HTP ", httpDebugFlag, __VA_ARGS__); \ + } +#define httpTrace(...) \ + if (httpDebugFlag & DEBUG_TRACE) { \ + tprintf("HTP ", httpDebugFlag, __VA_ARGS__); \ + } +#define httpDump(...) \ + if (httpDebugFlag & DEBUG_TRACE) { \ + taosPrintLongString("HTP ", httpDebugFlag, __VA_ARGS__); \ + } +#define httpPrint(...) \ + { tprintf("HTP ", 255, __VA_ARGS__); } + +#define httpLError(...) taosLogError(__VA_ARGS__) httpError(__VA_ARGS__) +#define httpLWarn(...) taosLogWarn(__VA_ARGS__) httpWarn(__VA_ARGS__) +#define httpLPrint(...) taosLogPrint(__VA_ARGS__) httpPrint(__VA_ARGS__) + +#endif diff --git a/src/modules/http/inc/httpCode.h b/src/modules/http/inc/httpCode.h new file mode 100644 index 000000000000..0235040139b5 --- /dev/null +++ b/src/modules/http/inc/httpCode.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HTTP_CODE_H +#define TDENGINE_HTTP_CODE_H + +//for fixed msg info +#define HTTP_SUCCESS 0 +#define HTTP_SERVER_OFFLINE 1 +#define HTTP_UNSUPPORT_URL 2 +#define HTTP_PARSE_HTTP_METHOD_ERROR 3 +#define HTTP_PARSE_HTTP_VERSION_ERROR 4 +#define HTTP_PARSE_HEAD_ERROR 5 +#define HTTP_REQUSET_TOO_BIG 6 +#define HTTP_PARSE_BODY_ERROR 7 +#define HTTP_PARSE_CHUNKED_BODY_ERROR 8 +#define HTTP_PARSE_URL_ERROR 9 +#define HTTP_INVALID_AUTH_TOKEN 10 +#define HTTP_PARSE_USR_ERROR 11 +#define HTTP_NO_SQL_INPUT 12 +#define HTTP_SESSION_FULL 13 +#define HTTP_NO_ENOUGH_MEMORY 14 +#define HTTP_GEN_TAOSD_TOKEN_ERR 15 +#define HTTP_INVALID_DB_TABLE 16 +#define HTTP_NO_EXEC_USEDB 17 +#define HTTP_PARSE_GC_REQ_ERROR 18 +#define HTTP_INVALID_MULTI_REQUEST 19 +#define HTTP_NO_MSG_INPUT 20 +#define HTTP_NO_ENOUGH_SESSIONS 21 + +//telegraf +#define HTTP_TG_DB_NOT_INPUT 22 +#define HTTP_TG_DB_TOO_LONG 23 +#define HTTP_TG_INVALID_JSON 24 +#define HTTP_TG_METRICS_NULL 25 +#define HTTP_TG_METRICS_SIZE 26 +#define HTTP_TG_METRIC_NULL 27 +#define HTTP_TG_METRIC_TYPE 28 +#define HTTP_TG_METRIC_NAME_NULL 29 +#define HTTP_TG_METRIC_NAME_LONG 30 +#define HTTP_TG_TIMESTAMP_NULL 31 +#define HTTP_TG_TIMESTAMP_TYPE 32 +#define HTTP_TG_TIMESTAMP_VAL_NULL 33 +#define HTTP_TG_TAGS_NULL 34 +#define HTTP_TG_TAGS_SIZE_0 35 +#define HTTP_TG_TAGS_SIZE_LONG 36 +#define HTTP_TG_TAG_NULL 37 +#define HTTP_TG_TAG_NAME_NULL 38 +#define HTTP_TG_TAG_NAME_SIZE 39 +#define HTTP_TG_TAG_VALUE_TYPE 40 +#define HTTP_TG_TAG_VALUE_NULL 41 +#define HTTP_TG_TABLE_NULL 42 +#define HTTP_TG_TABLE_SIZE 43 +#define HTTP_TG_FIELDS_NULL 44 +#define HTTP_TG_FIELDS_SIZE_0 45 +#define HTTP_TG_FIELDS_SIZE_LONG 46 +#define HTTP_TG_FIELD_NULL 47 +#define HTTP_TG_FIELD_NAME_NULL 48 +#define HTTP_TG_FIELD_NAME_SIZE 49 +#define HTTP_TG_FIELD_VALUE_TYPE 50 +#define HTTP_TG_FIELD_VALUE_NULL 51 +#define HTTP_INVALID_BASIC_AUTH_TOKEN 52 +#define HTTP_INVALID_TAOSD_AUTH_TOKEN 53 +#define HTTP_TG_HOST_NOT_STRING 54 + +//grafana +#define HTTP_GC_QUERY_NULL 55 +#define HTTP_GC_QUERY_SIZE 56 + +//opentsdb +#define HTTP_OP_DB_NOT_INPUT 57 +#define HTTP_OP_DB_TOO_LONG 58 +#define HTTP_OP_INVALID_JSON 59 +#define HTTP_OP_METRICS_NULL 60 +#define HTTP_OP_METRICS_SIZE 61 +#define HTTP_OP_METRIC_NULL 62 +#define HTTP_OP_METRIC_TYPE 63 +#define HTTP_OP_METRIC_NAME_NULL 64 +#define HTTP_OP_METRIC_NAME_LONG 65 +#define HTTP_OP_TIMESTAMP_NULL 66 +#define HTTP_OP_TIMESTAMP_TYPE 67 +#define HTTP_OP_TIMESTAMP_VAL_NULL 68 +#define HTTP_OP_TAGS_NULL 69 +#define HTTP_OP_TAGS_SIZE_0 70 +#define HTTP_OP_TAGS_SIZE_LONG 71 +#define HTTP_OP_TAG_NULL 72 +#define HTTP_OP_TAG_NAME_NULL 73 +#define HTTP_OP_TAG_NAME_SIZE 74 +#define HTTP_OP_TAG_VALUE_TYPE 75 +#define HTTP_OP_TAG_VALUE_NULL 76 +#define HTTP_OP_TAG_VALUE_TOO_LONG 77 +#define HTTP_OP_VALUE_NULL 78 +#define HTTP_OP_VALUE_TYPE 79 + +//tgf +#define HTTP_TG_STABLE_NOT_EXIST 80 + +extern char *httpMsg[]; + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/httpHandle.h b/src/modules/http/inc/httpHandle.h new file mode 100644 index 000000000000..cf316cd2448f --- /dev/null +++ b/src/modules/http/inc/httpHandle.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HTTP_SERVER_H +#define TDENGINE_HTTP_SERVER_H + +#include +#include "pthread.h" +#include "semaphore.h" +#include "tmempool.h" +#include "tsdb.h" +#include "tutil.h" + +#include "http.h" +#include "httpJson.h" + +#define HTTP_MAX_CMD_SIZE 1024*20 +#define HTTP_MAX_BUFFER_SIZE 1024*1024*10 + +#define HTTP_LABEL_SIZE 8 +#define HTTP_MAX_EVENTS 10 +#define HTTP_BUFFER_SIZE 1024*100//100k +#define HTTP_STEP_SIZE 1024 //http message get process step by step +#define HTTP_MAX_URL 5 //http url stack size +#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size +#define HTTP_GC_TARGET_SIZE 128 + +#define HTTP_VERSION_10 0 +#define HTTP_VERSION_11 1 +//#define HTTP_VERSION_12 2 + +#define HTTP_UNCUNKED 0 +#define HTTP_CHUNKED 1 + +#define HTTP_KEEPALIVE_NO_INPUT 0 +#define HTTP_KEEPALIVE_ENABLE 1 +#define HTTP_KEEPALIVE_DISABLE 2 + +#define HTTP_REQTYPE_OTHERS 0 +#define HTTP_REQTYPE_LOGIN 1 +#define HTTP_REQTYPE_HEARTBEAT 2 +#define HTTP_REQTYPE_SINGLE_SQL 3 +#define HTTP_REQTYPE_MULTI_SQL 4 + +#define HTTP_CLOSE_CONN 0 +#define HTTP_KEEP_CONN 1 + +#define HTTP_PROCESS_ERROR 0 +#define HTTP_PROCESS_SUCCESS 1 + +struct HttpContext; +struct HttpThread; + +typedef struct { + void *signature; + int expire; + int access; + void *taos; + char id[TSDB_USER_LEN]; +} HttpSession; + +typedef enum { + HTTP_CMD_TYPE_UN_SPECIFIED, + HTTP_CMD_TYPE_CREATE_DB, + HTTP_CMD_TYPE_CREATE_STBALE, + HTTP_CMD_TYPE_INSERT +} HttpSqlCmdType; + +typedef enum { HTTP_CMD_STATE_NOT_RUN_YET, HTTP_CMD_STATE_RUN_FINISHED } HttpSqlCmdState; + +typedef enum { HTTP_CMD_RETURN_TYPE_WITH_RETURN, HTTP_CMD_RETURN_TYPE_NO_RETURN } HttpSqlCmdReturnType; + +typedef struct { + // used by single cmd + char * nativSql; + int32_t numOfRows; + int32_t code; + + // these are the locations in the buffer + int32_t tagNames[TSDB_MAX_TAGS]; + int32_t tagValues[TSDB_MAX_TAGS]; + int32_t timestamp; + int32_t metric; + int32_t stable; + int32_t table; + int32_t values; + int32_t sql; + + // used by multi-cmd + int8_t cmdType; + int8_t cmdReturnType; + int8_t cmdState; + int8_t tagNum; +} HttpSqlCmd; + +typedef struct { + HttpSqlCmd *cmds; + int16_t pos; + int16_t size; + int16_t maxSize; + int32_t bufferPos; + int32_t bufferSize; + char * buffer; +} HttpSqlCmds; + +typedef struct { + char *module; + bool (*decodeFp)(struct HttpContext *pContext); +} HttpDecodeMethod; + +typedef struct { + void (*startJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, void *result); + void (*stopJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd); + bool (*buildQueryJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, void *result, int numOfRows); + void (*buildAffectRowJsonFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, int affectRows); + void (*initJsonFp)(struct HttpContext *pContext); + void (*cleanJsonFp)(struct HttpContext *pContext); + bool (*checkFinishedFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, int code); + void (*setNextCmdFp)(struct HttpContext *pContext, HttpSqlCmd *cmd, int code); +} HttpEncodeMethod; + +typedef struct { + char * pos; + int32_t len; +} HttpBuf; + +typedef struct HttpContext { + void * signature; + int fd; + uint32_t accessTimes; + uint8_t httpVersion : 1; + uint8_t httpChunked : 1; + uint8_t httpKeepAlive : 2; // http1.0 and not keep-alive, close connection immediately + uint8_t fromMemPool : 1; + uint8_t compress : 1; + uint8_t usedByEpoll : 1; + uint8_t usedByApp : 1; + uint8_t reqType; + char ipstr[22]; + char user[TSDB_USER_LEN]; // parsed from auth token or login message + char pass[TSDB_PASSWORD_LEN]; + void * taos; + HttpSession *session; + HttpEncodeMethod * encodeMethod; + HttpSqlCmd singleCmd; + HttpSqlCmds * multiCmds; + JsonBuf * jsonBuf; + pthread_mutex_t mutex; + struct HttpThread * pThread; + struct HttpContext *prev, *next; +} HttpContext; + +typedef struct { + char * buffer; + int bufsize; + char * pLast; + char * pCur; + HttpBuf method; + HttpBuf path[HTTP_MAX_URL]; // url: dbname/meter/query + HttpBuf data; // body content + HttpBuf token; // auth token + HttpDecodeMethod *pMethod; +} HttpParser; + +#define HTTP_MAX_FDS_LEN 65536 + +typedef struct HttpThread { + pthread_t thread; + HttpContext * pHead; + pthread_mutex_t threadMutex; + pthread_cond_t fdReady; + int pollFd; + int numOfFds; + int threadId; + char label[HTTP_LABEL_SIZE]; + char buffer[HTTP_BUFFER_SIZE]; // buffer to receive data + HttpParser parser; // parse from buffer + bool (*processData)(HttpContext *pContext); + struct _http_server_obj_ *pServer; // handle passed by upper layer during pServer initialization +} HttpThread; + +typedef struct _http_server_obj_ { + char label[HTTP_LABEL_SIZE]; + char serverIp[16]; + short serverPort; + int cacheContext; + int sessionExpire; + int numOfThreads; + HttpDecodeMethod *methodScanner[HTTP_METHOD_SCANNER_SIZE]; + int methodScannerLen; + pthread_mutex_t serverMutex; + void * pSessionHash; + void * pContextPool; + void * expireTimer; + HttpThread * pThreads; + pthread_t thread; + bool (*processData)(HttpContext *pContext); + int requestNum; + void *timerHandle; + bool online; +} HttpServer; + +// http util method +bool httpCheckUsedbSql(char *sql); +void httpTimeToString(time_t t, char *buf, int buflen); + +// http init method +void *httpInitServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle); +void httpCleanUpServer(HttpServer *pServer); + +// http server connection +void httpCleanUpConnect(HttpServer *pServer); +bool httpInitConnect(HttpServer *pServer); + +// http context for each client connection +HttpContext *httpCreateContext(HttpServer *pServer); +bool httpInitContext(HttpContext *pContext); +void httpCloseContextByApp(HttpContext *pContext); +void httpCloseContextByServer(HttpThread *pThread, HttpContext *pContext); + +// http session method +void httpCreateSession(HttpContext *pContext, void *taos); +void httpAccessSession(HttpContext *pContext); +void httpFetchSession(HttpContext *pContext); +void httpRestoreSession(HttpContext *pContext); +void httpRemoveExpireSessions(HttpServer *pServer); +bool httpInitAllSessions(HttpServer *pServer); +void httpRemoveAllSessions(HttpServer *pServer); +void httpProcessSessionExpire(void *handle, void *tmrId); + +// http request parser +void httpAddMethod(HttpServer *pServer, HttpDecodeMethod *pMethod); + +// http token method +bool httpParseBasicAuthToken(HttpContext *pContext, char *token, int len); +bool httpParseTaosdAuthToken(HttpContext *pContext, char *token, int len); +bool httpGenTaosdAuthToken(HttpContext *pContext, char *token, int maxLen); + +// util +bool httpUrlMatch(HttpContext *pContext, int pos, char *cmp); +bool httpProcessData(HttpContext *pContext); +bool httpReadDataImp(HttpContext *pContext); + +// http request handler +void httpProcessRequest(HttpContext *pContext); + +// http json printer +JsonBuf *httpMallocJsonBuf(HttpContext *pContext); +void httpFreeJsonBuf(HttpContext *pContext); + +// http multicmds util + +int32_t httpAddToSqlCmdBuffer(HttpContext *pContext, const char *const format, ...); +int32_t httpAddToSqlCmdBufferNoTerminal(HttpContext *pContext, const char *const format, ...); +int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int mallocSize); +int32_t httpAddToSqlCmdBufferTerminal(HttpContext *pContext); + +bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize); +bool httpReMallocMultiCmdsSize(HttpContext *pContext, int cmdSize); +bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int bufferSize); +void httpFreeMultiCmds(HttpContext *pContext); + +HttpSqlCmd *httpNewSqlCmd(HttpContext *pContext); +HttpSqlCmd *httpCurrSqlCmd(HttpContext *pContext); +int httpCurSqlCmdPos(HttpContext *pContext); + +void httpTrimTableName(char *name); +int httpShrinkTableName(HttpContext *pContext, int pos, char *name); +char *httpGetCmdsString(HttpContext *pContext, int pos); + +extern const char *httpKeepAliveStr[]; +extern const char *httpVersionStr[]; + +#endif diff --git a/src/modules/http/inc/httpJson.h b/src/modules/http/inc/httpJson.h new file mode 100644 index 000000000000..77a995670b47 --- /dev/null +++ b/src/modules/http/inc/httpJson.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HTTP_JSON_H +#define TDENGINE_HTTP_JSON_H + +#include + +#define JSON_BUFFER_SIZE 4096 +struct HttpContext; + +enum { JsonNumber, JsonString, JsonBoolean, JsonArray, JsonObject, JsonNull }; + +typedef enum { JsonCompress, JsonUnCompress } JsonCompressFlag; + +extern char JsonItmTkn; +extern char JsonObjStt; +extern char JsonObjEnd; +extern char JsonArrStt; +extern char JsonArrEnd; +extern char JsonStrStt; +extern char JsonStrEnd; +extern char JsonPairTkn; +extern char JsonNulTkn[]; +extern char JsonTrueTkn[]; +extern char JsonFalseTkn[]; + +typedef struct { + int size; + int total; + char* lst; + char buf[JSON_BUFFER_SIZE]; + struct HttpContext* pContext; +} JsonBuf; + +// http response +int httpWriteBuf(struct HttpContext* pContext, const char* buf, int sz); +int httpWriteBufByFd(int fd, const char* buf, int sz); + +// builder callback +typedef void (*httpJsonBuilder)(JsonBuf* buf, void* jsnHandle); + +// buffer +void httpInitJsonBuf(JsonBuf* buf, struct HttpContext* pContext); +void httpWriteJsonBufHead(JsonBuf* buf); +int httpWriteJsonBufBody(JsonBuf* buf); +void httpWriteJsonBufEnd(JsonBuf* buf); + +// value +void httpJsonString(JsonBuf* buf, char* sVal, int len); +void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int maxLen); +void httpJsonInt64(JsonBuf* buf, int64_t num); +void httpJsonTimestamp(JsonBuf* buf, int64_t t); +void httpJsonInt(JsonBuf* buf, int num); +void httpJsonFloat(JsonBuf* buf, float num); +void httpJsonDouble(JsonBuf* buf, double num); +void httpJsonNull(JsonBuf* buf); +void httpJsonBool(JsonBuf* buf, int val); + +// pair +void httpJsonPair(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen); +void httpJsonPairOriginString(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen); +void httpJsonPairHead(JsonBuf* buf, char* name, int len); +void httpJsonPairIntVal(JsonBuf* buf, char* name, int nNameLen, int num); +void httpJsonPairInt64Val(JsonBuf* buf, char* name, int nNameLen, int64_t num); +void httpJsonPairBoolVal(JsonBuf* buf, char* name, int nNameLen, int num); +void httpJsonPairFloatVal(JsonBuf* buf, char* name, int nNameLen, float num); +void httpJsonPairDoubleVal(JsonBuf* buf, char* name, int nNameLen, double num); +void httpJsonPairNullVal(JsonBuf* buf, char* name, int nNameLen); + +// object +void httpJsonPairArray(JsonBuf* buf, char* name, int nLen, httpJsonBuilder builder, void* dsHandle); +void httpJsonPairObject(JsonBuf* buf, char* name, int nLen, httpJsonBuilder builder, void* dsHandle); +void httpJsonObject(JsonBuf* buf, httpJsonBuilder fnBuilder, void* dsHandle); +void httpJsonArray(JsonBuf* buf, httpJsonBuilder fnBuidler, void* jsonHandle); + +// print +void httpJsonTestBuf(JsonBuf* buf, int safety); +void httpJsonToken(JsonBuf* buf, char c); +void httpJsonItemToken(JsonBuf* buf); +void httpJsonPrint(JsonBuf* buf, const char* json, int len); + +// quick +void httpJsonPairStatus(JsonBuf* buf, int code); + +#endif diff --git a/src/modules/http/inc/httpResp.h b/src/modules/http/inc/httpResp.h new file mode 100644 index 000000000000..d4a0a41b8576 --- /dev/null +++ b/src/modules/http/inc/httpResp.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HTTP_RESP_H +#define TDENGINE_HTTP_RESP_H + +#include "httpHandle.h" + +enum _httpRespTempl { + HTTP_RESPONSE_JSON_OK, + HTTP_RESPONSE_JSON_ERROR, + HTTP_RESPONSE_OK, + HTTP_RESPONSE_ERROR, + HTTP_RESPONSE_CHUNKED_UN_COMPRESS, + HTTP_RESPONSE_CHUNKED_COMPRESS, + HTTP_RESPONSE_OPTIONS, + HTTP_RESPONSE_GRAFANA, + HTTP_RESP_END +}; + +extern const char *httpRespTemplate[]; + +void httpSendErrorResp(HttpContext *pContext, int errNo); +void httpSendErrorRespWithDesc(HttpContext *pContext, int errNo, char *desc); +void httpSendTaosdErrorResp(HttpContext *pContext, int errCode); +void httpSendSuccResp(HttpContext *pContext, char *desc); +void httpSendOptionResp(HttpContext *pContext, char *desc); + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/httpSystem.h b/src/modules/http/inc/httpSystem.h new file mode 100644 index 000000000000..0a09c3f7629e --- /dev/null +++ b/src/modules/http/inc/httpSystem.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_HTTP_SYSTEM_H +#define TDENGINE_HTTP_SYSTEM_H + +int httpInitSystem(); +int httpStartSystem(); +void httpStopSystem(); +void httpCleanUpSystem(); +void httpGetReqCount(int32_t *httpConns); + +#endif diff --git a/src/modules/http/inc/restHandle.h b/src/modules/http/inc/restHandle.h new file mode 100644 index 000000000000..48ad040c5383 --- /dev/null +++ b/src/modules/http/inc/restHandle.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_REST_HANDLE_H +#define TDENGINE_REST_HANDLE_H + +#include +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" + +#define REST_ROOT_URL_POS 0 +#define REST_ACTION_URL_POS 1 +#define REST_USER_URL_POS 2 +#define REST_PASS_URL_POS 3 + +void restInitHandle(HttpServer* pServer); +bool restProcessRequest(struct HttpContext* pContext); + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/restJson.h b/src/modules/http/inc/restJson.h new file mode 100644 index 000000000000..109e9a33bfc2 --- /dev/null +++ b/src/modules/http/inc/restJson.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_REST_JSON_H +#define TDENGINE_REST_JSON_H +#include +#include "httpHandle.h" +#include "httpJson.h" +#include "taos.h" + +#define REST_JSON_SUCCESS "succ" +#define REST_JSON_SUCCESS_LEN 4 +#define REST_JSON_FAILURE "error" +#define REST_JSON_FAILURE_LEN 5 +#define REST_JSON_STATUS "status" +#define REST_JSON_STATUS_LEN 6 +#define REST_JSON_CODE "code" +#define REST_JSON_CODE_LEN 4 +#define REST_JSON_DESC "desc" +#define REST_JSON_DESC_LEN 4 +#define REST_JSON_DATA "data" +#define REST_JSON_DATA_LEN 4 +#define REST_JSON_HEAD "head" +#define REST_JSON_HEAD_LEN 4 +#define REST_JSON_ROWS "rows" +#define REST_JSON_ROWS_LEN 4 +#define REST_JSON_AFFECT_ROWS "affected_rows" +#define REST_JSON_AFFECT_ROWS_LEN 13 + +void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows); + +void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result); +bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows); +bool restBuildSqlTimeJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows); +void restStopSqlJson(HttpContext *pContext, HttpSqlCmd *cmd); + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/tgHandle.h b/src/modules/http/inc/tgHandle.h new file mode 100644 index 000000000000..5b8c49900f33 --- /dev/null +++ b/src/modules/http/inc/tgHandle.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TG_HANDLE_H +#define TDENGINE_TG_HANDLE_H + +#include +#include +#include +#include + +#include "cJSON.h" +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" + +#define TG_ROOT_URL_POS 0 +#define TG_DB_URL_POS 1 +#define TG_USER_URL_POS 2 +#define TG_PASS_URL_POS 3 + +void tgInitHandle(HttpServer *pServer); + +bool tgProcessRquest(struct HttpContext *pContext); + +#endif \ No newline at end of file diff --git a/src/modules/http/inc/tgJson.h b/src/modules/http/inc/tgJson.h new file mode 100644 index 000000000000..bf3aa093ae91 --- /dev/null +++ b/src/modules/http/inc/tgJson.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TG_JSON_H +#define TDENGINE_TG_JSON_H + +#include "httpHandle.h" +#include "httpJson.h" +#include "taos.h" + +void tgInitQueryJson(HttpContext *pContext); +void tgCleanQueryJson(HttpContext *pContext); +void tgStartQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result); +void tgStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd); +void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows); +bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code); +void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code); + +#endif \ No newline at end of file diff --git a/src/modules/http/src/cJSON.c b/src/modules/http/src/cJSON.c new file mode 100644 index 000000000000..1649161a252b --- /dev/null +++ b/src/modules/http/src/cJSON.c @@ -0,0 +1,2699 @@ +/* + Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +/* cJSON */ +/* JSON parser in C. */ + +#ifdef __GNUC__ +#pragma GCC visibility push(default) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + +#include "cJSON.h" + +/* define our own boolean type */ +#define true ((cJSON_bool)1) +#define false ((cJSON_bool)0) + +typedef struct { + const unsigned char *json; + size_t position; +} error; +static error global_error = { NULL, 0 }; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) +{ + return (const char*) (global_error.json + global_error.position); +} + +/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 5) || (CJSON_VERSION_PATCH != 9) + #error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#endif + +CJSON_PUBLIC(const char*) cJSON_Version(void) +{ + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); + + return version; +} + +/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ +static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) +{ + if ((string1 == NULL) || (string2 == NULL)) + { + return 1; + } + + if (string1 == string2) + { + return 0; + } + + for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++) + { + if (*string1 == '\0') + { + return 0; + } + } + + return tolower(*string1) - tolower(*string2); +} + +typedef struct internal_hooks +{ + void *(*allocate)(size_t size); + void (*deallocate)(void *pointer); + void *(*reallocate)(void *pointer, size_t size); +} internal_hooks; + +static internal_hooks global_hooks = { malloc, free, realloc }; + +static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) +{ + size_t length = 0; + unsigned char *copy = NULL; + + if (string == NULL) + { + return NULL; + } + + length = strlen((const char*)string) + sizeof(""); + if (!(copy = (unsigned char*)hooks->allocate(length))) + { + return NULL; + } + memcpy(copy, string, length); + + return copy; +} + +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) +{ + if (hooks == NULL) + { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) + { + global_hooks.allocate = hooks->malloc_fn; + } + + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) + { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) + { + global_hooks.reallocate = realloc; + } +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(const internal_hooks * const hooks) +{ + cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); + if (node) + { + memset(node, '\0', sizeof(cJSON)); + } + + return node; +} + +/* Delete a cJSON structure. */ +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) +{ + cJSON *next = NULL; + while (item != NULL) + { + next = item->next; + if (!(item->type & cJSON_IsReference) && (item->child != NULL)) + { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) + { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } +} + +/* get the decimal point character of the current locale */ +static unsigned char get_decimal_point(void) +{ + struct lconv *lconv = localeconv(); + return (unsigned char) lconv->decimal_point[0]; +} + +typedef struct +{ + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ + internal_hooks hooks; +} parse_buffer; + +/* check if the given size is left to read in a given parse buffer (starting with 1) */ +#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +#define cannot_read(buffer, size) (!can_read(buffer, size)) +/* check if the buffer can be accessed at the given index (starting with 0) */ +#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) +/* get a pointer to the buffer at the position */ +#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) + +/* Parse the input text to generate a number, and populate the result into item. */ +static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) +{ + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the decimal point + * of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) + { + switch (buffer_at_offset(input_buffer)[i]) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } +loop_end: + number_c_string[i] = '\0'; + + number = strtod((const char*)number_c_string, (char**)&after_end); + if (number_c_string == after_end) + { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= LLONG_MAX) + { + item->valueint = LLONG_MAX; + } + else if (number <= LLONG_MIN) + { + item->valueint = LLONG_MIN; + } + else + { + item->valueint = (int64_t)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; +} + +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) +{ + if (number >= LLONG_MAX) + { + object->valueint = LLONG_MAX; + } + else if (number <= LLONG_MIN) + { + object->valueint = LLONG_MIN; + } + else + { + object->valueint = (int64_t)number; + } + + return object->valuedouble = number; +} + +typedef struct +{ + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; +} printbuffer; + +/* realloc printbuffer if necessary to have at least "needed" bytes more */ +static unsigned char* ensure(printbuffer * const p, size_t needed) +{ + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) + { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) + { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > LLONG_MAX) + { + /* sizes bigger than LLONG_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) + { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (LLONG_MAX / 2)) + { + /* overflow of int, use LLONG_MAX if possible */ + if (needed <= LLONG_MAX) + { + newsize = LLONG_MAX; + } + else + { + return NULL; + } + } + else + { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) + { + /* reallocate with realloc if available */ + newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } + else + { + /* otherwise reallocate manually */ + newbuffer = (unsigned char*)p->hooks.allocate(newsize); + if (!newbuffer) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) + { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; +} + +/* calculate the new length of the string in a printbuffer and update the offset */ +static void update_offset(printbuffer * const buffer) +{ + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) + { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; + + buffer->offset += strlen((const char*)buffer_pointer); +} + +/* Render the number nicely from the given item into a string. */ +static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26]; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test; + + if (output_buffer == NULL) + { + return false; + } + + /* This checks for NaN and Infinity */ + if ((d * 0) != 0) + { + length = sprintf((char*)number_buffer, "null"); + } + else + { + /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ + length = sprintf((char*)number_buffer, "%1.15g", d); + + /* Check whether the original double can be recovered */ + if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || ((double)test != d)) + { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char*)number_buffer, "%1.17g", d); + } + } + + /* sprintf failed or buffer overrun occured */ + if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) + { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length); + if (output_pointer == NULL) + { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) + { + if (number_buffer[i] == decimal_point) + { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; +} + +/* parse 4 digit hexadecimal number */ +static unsigned parse_hex4(const unsigned char * const input) +{ + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) + { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) + { + h += (unsigned int) input[i] - '0'; + } + else if ((input[i] >= 'A') && (input[i] <= 'F')) + { + h += (unsigned int) 10 + input[i] - 'A'; + } + else if ((input[i] >= 'a') && (input[i] <= 'f')) + { + h += (unsigned int) 10 + input[i] - 'a'; + } + else /* invalid */ + { + return 0; + } + + if (i < 3) + { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; +} + +/* converts a UTF-16 literal to UTF-8 + * A literal can be one or two sequences of the form \uXXXX */ +static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) +{ + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) + { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) + { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) + { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) + { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); + } + else + { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) + { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } + else if (codepoint < 0x800) + { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } + else if (codepoint < 0x10000) + { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } + else if (codepoint <= 0x10FFFF) + { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } + else + { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) + { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) + { + (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } + else + { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) +{ + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') + { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) + { + /* is escape sequence */ + if (input_end[0] == '\\') + { + if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) + { + /* prevent buffer overflow when last input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) + { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes; + output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); + if (output == NULL) + { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) + { + if (*input_pointer != '\\') + { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else + { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) + { + goto fail; + } + + switch (input_pointer[1]) + { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); + if (sequence_length == 0) + { + /* failed to convert UTF16-literal to UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char*)output; + + input_buffer->offset = (size_t) (input_end - input_buffer->content); + input_buffer->offset++; + + return true; + +fail: + if (output != NULL) + { + input_buffer->hooks.deallocate(output); + } + + if (input_pointer != NULL) + { + input_buffer->offset = (size_t)(input_pointer - input_buffer->content); + } + + return false; +} + +/* Render the cstring provided to an escaped version that can be printed. */ +static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) +{ + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) + { + return false; + } + + /* empty string */ + if (input == NULL) + { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) + { + switch (*input_pointer) + { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) + { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) + { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) + { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) + { + if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) + { + /* normal character, copy */ + *output_pointer = *input_pointer; + } + else + { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) + { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char*)output_pointer, "u%04x", *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; +} + +/* Invoke print_string_ptr (which is useful) on an item. */ +static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) +{ + return print_string_ptr((unsigned char*)item->valuestring, p); +} + +/* Predeclare these prototypes. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); + +/* Utility to jump whitespace and cr/lf */ +static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL)) + { + return NULL; + } + + while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) + { + buffer->offset++; + } + + if (buffer->offset == buffer->length) + { + buffer->offset--; + } + + return buffer; +} + +/* Parse an object - create a new root, and populate. */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) +{ + parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } }; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL) + { + goto fail; + } + + buffer.content = (const unsigned char*)value; + buffer.length = strlen((const char*)value) + sizeof(""); + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, buffer_skip_whitespace(&buffer))) + { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ + if (require_null_terminated) + { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') + { + goto fail; + } + } + if (return_parse_end) + { + *return_parse_end = (const char*)buffer_at_offset(&buffer); + } + + return item; + +fail: + if (item != NULL) + { + cJSON_Delete(item); + } + + if (value != NULL) + { + error local_error; + local_error.json = (const unsigned char*)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) + { + local_error.position = buffer.offset; + } + else if (buffer.length > 0) + { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) + { + *return_parse_end = (const char*)local_error.json + local_error.position; + } + + global_error = local_error; + } + + return NULL; +} + +/* Default options for cJSON_Parse */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) +{ + return cJSON_ParseWithOpts(value, 0, 0); +} + +#define cjson_min(a, b) ((a < b) ? a : b) + +static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) +{ + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char*) hooks->allocate(256); + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) + { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) + { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) + { + printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->length); + buffer->buffer = NULL; + if (printed == NULL) { + goto fail; + } + } + else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char*) hooks->allocate(buffer->offset + 1); + if (printed == NULL) + { + goto fail; + } + memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; + +fail: + if (buffer->buffer != NULL) + { + hooks->deallocate(buffer->buffer); + } + + if (printed != NULL) + { + hooks->deallocate(printed); + } + + return NULL; +} + +/* Render a cJSON item/entity/structure to text. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) +{ + return (char*)print(item, true, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) +{ + return (char*)print(item, false, &global_hooks); +} + +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if (prebuffer < 0) + { + return NULL; + } + + p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) + { + return NULL; + } + + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; + + if (!print_value(item, &p)) + { + global_hooks.deallocate(p.buffer); + return NULL; + } + + return (char*)p.buffer; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buf, const int len, const cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } }; + + if ((len < 0) || (buf == NULL)) + { + return false; + } + + p.buffer = (unsigned char*)buf; + p.length = (size_t)len; + p.offset = 0; + p.noalloc = true; + p.format = fmt; + p.hooks = global_hooks; + + return print_value(item, &p); +} + +/* Parser core - when encountering text, process appropriately. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) +{ + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) + { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) + { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) + { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) + { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) + { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) + { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) + { + return parse_object(item, input_buffer); + } + + + return false; +} + +/* Render a value to text. */ +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) + { + return false; + } + + switch ((item->type) & 0xFF) + { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: + { + size_t raw_length = 0; + if (item->valuestring == NULL) + { + if (!output_buffer->noalloc) + { + output_buffer->hooks.deallocate(output_buffer->buffer); + } + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) + { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } +} + +/* Build an array from input text. */ +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') + { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) + { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') + { + goto fail; /* expected end of array */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Array; + item->child = head; + + input_buffer->offset++; + + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an array to text */ +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_element = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) + { + if (!print_value(current_element, output_buffer)) + { + return false; + } + update_offset(output_buffer); + if (current_element->next) + { + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ','; + if(output_buffer->format) + { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Build an object from the text. */ +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) + { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) + { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) + { + goto fail; /* faile to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) + { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } + while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) + { + goto fail; /* expected end of object */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Object; + item->child = head; + + input_buffer->offset++; + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; +} + +/* Render an object to text. */ +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + size_t length = 0; + cJSON *current_item = item->child; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output: */ + length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) + { + if (output_buffer->format) + { + size_t i; + output_pointer = ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) + { + return false; + } + for (i = 0; i < output_buffer->depth; i++) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + length = (size_t) (output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = (size_t) ((output_buffer->format ? 1 : 0) + (current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + if (current_item->next) + { + *output_pointer++ = ','; + } + + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) + { + return false; + } + if (output_buffer->format) + { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) + { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; +} + +/* Get Array size/item / object item. */ +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) +{ + cJSON *child = NULL; + size_t size = 0; + + if (array == NULL) + { + return 0; + } + + child = array->child; + + while(child != NULL) + { + size++; + child = child->next; + } + + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + + return (int)size; +} + +static cJSON* get_array_item(const cJSON *array, size_t index) +{ + cJSON *current_child = NULL; + + if (array == NULL) + { + return NULL; + } + + current_child = array->child; + while ((current_child != NULL) && (index > 0)) + { + index--; + current_child = current_child->next; + } + + return current_child; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) +{ + if (index < 0) + { + return NULL; + } + + return get_array_item(array, (size_t)index); +} + +static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) +{ + cJSON *current_element = NULL; + + if ((object == NULL) || (name == NULL)) + { + return NULL; + } + + current_element = object->child; + if (case_sensitive) + { + while ((current_element != NULL) && (strcmp(name, current_element->string) != 0)) + { + current_element = current_element->next; + } + } + else + { + while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) + { + current_element = current_element->next; + } + } + + return current_element; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, false); +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, true); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) +{ + return cJSON_GetObjectItem(object, string) ? 1 : 0; +} + +/* Utility for array list handling. */ +static void suffix_object(cJSON *prev, cJSON *item) +{ + prev->next = item; + item->prev = prev; +} + +/* Utility for handling references. */ +static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) +{ + cJSON *reference = NULL; + if (item == NULL) + { + return NULL; + } + + reference = cJSON_New_Item(hooks); + if (reference == NULL) + { + return NULL; + } + + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; +} + +/* Add item to array/object. */ +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item) +{ + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL)) + { + return; + } + + child = array->child; + + if (child == NULL) + { + /* list is empty, start new one */ + array->child = item; + } + else + { + /* append to the end */ + while (child->next) + { + child = child->next; + } + suffix_object(child, item); + } +} + +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) +{ + if (item == NULL) + { + return; + } + + /* call cJSON_AddItemToObjectCS for code reuse */ + cJSON_AddItemToObjectCS(object, (char*)cJSON_strdup((const unsigned char*)string, &global_hooks), item); + /* remove cJSON_StringIsConst flag */ + item->type &= ~cJSON_StringIsConst; +} + +#if defined (__clang__) || ((__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic push +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +/* Add an item to an object with constant string as key */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) +{ + if ((item == NULL) || (string == NULL)) + { + return; + } + if (!(item->type & cJSON_StringIsConst) && item->string) + { + global_hooks.deallocate(item->string); + } + item->string = (char*)string; + item->type |= cJSON_StringIsConst; + cJSON_AddItemToArray(object, item); +} +#if defined (__clang__) || ((__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) + #pragma GCC diagnostic pop +#endif + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) +{ + if (array == NULL) + { + return; + } + + cJSON_AddItemToArray(array, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) +{ + if ((object == NULL) || (string == NULL)) + { + return; + } + + cJSON_AddItemToObject(object, string, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) +{ + if ((parent == NULL) || (item == NULL)) + { + return NULL; + } + + if (item->prev != NULL) + { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) + { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) + { + /* first element */ + parent->child = item->next; + } + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) +{ + if (which < 0) + { + return NULL; + } + + return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) +{ + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItem(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +} + +/* Replace array/object items with new ones. */ +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) +{ + cJSON *after_inserted = NULL; + + if (which < 0) + { + return; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) + { + cJSON_AddItemToArray(array, newitem); + return; + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) + { + array->child = newitem; + } + else + { + newitem->prev->next = newitem; + } +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) +{ + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) + { + return false; + } + + if (replacement == item) + { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) + { + replacement->next->prev = replacement; + } + if (replacement->prev != NULL) + { + replacement->prev->next = replacement; + } + if (parent->child == item) + { + parent->child = replacement; + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) +{ + if (which < 0) + { + return; + } + + cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); +} + +static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) +{ + if ((replacement == NULL) || (string == NULL)) + { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) + { + cJSON_free(replacement->string); + } + replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; + + cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, false); +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, true); +} + +/* Create basic types: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_NULL; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_True; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool b) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = b ? cJSON_True : cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= LLONG_MAX) + { + item->valueint = LLONG_MAX; + } + else if (num <= LLONG_MIN) + { + item->valueint = LLONG_MIN; + } + else + { + item->valueint = (int64_t)num; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_String; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type = cJSON_Raw; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); + if(!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if(item) + { + item->type=cJSON_Array; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Object; + } + + return item; +} + +/* Create Arrays: */ +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber((double)numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for(i = 0;a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateString(strings[i]); + if(!n) + { + cJSON_Delete(a); + return NULL; + } + if(!i) + { + a->child = n; + } + else + { + suffix_object(p,n); + } + p = n; + } + + return a; +} + +/* Duplication */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) +{ + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) + { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) + { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) + { + newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); + if (!newitem->valuestring) + { + goto fail; + } + } + if (item->string) + { + newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); + if (!newitem->string) + { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) + { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) + { + newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) + { + goto fail; + } + if (next != NULL) + { + /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } + else + { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + + return newitem; + +fail: + if (newitem != NULL) + { + cJSON_Delete(newitem); + } + + return NULL; +} + +CJSON_PUBLIC(void) cJSON_Minify(char *json) +{ + unsigned char *into = (unsigned char*)json; + + if (json == NULL) + { + return; + } + + while (*json) + { + if (*json == ' ') + { + json++; + } + else if (*json == '\t') + { + /* Whitespace characters. */ + json++; + } + else if (*json == '\r') + { + json++; + } + else if (*json=='\n') + { + json++; + } + else if ((*json == '/') && (json[1] == '/')) + { + /* double-slash comments, to end of line. */ + while (*json && (*json != '\n')) + { + json++; + } + } + else if ((*json == '/') && (json[1] == '*')) + { + /* multiline comments. */ + while (*json && !((*json == '*') && (json[1] == '/'))) + { + json++; + } + json += 2; + } + else if (*json == '\"') + { + /* string literals, which are \" sensitive. */ + *into++ = (unsigned char)*json++; + while (*json && (*json != '\"')) + { + if (*json == '\\') + { + *into++ = (unsigned char)*json++; + } + *into++ = (unsigned char)*json++; + } + *into++ = (unsigned char)*json++; + } + else + { + /* All other characters. */ + *into++ = (unsigned char)*json++; + } + } + + /* and null-terminate. */ + *into = '\0'; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Invalid; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_False; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xff) == cJSON_True; +} + + +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & (cJSON_True | cJSON_False)) != 0; +} +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_NULL; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Number; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_String; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Array; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Object; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Raw; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) +{ + if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) + { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) + { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) + { + return true; + } + + switch (a->type & 0xFF) + { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (a->valuedouble == b->valuedouble) + { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) + { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) + { + return true; + } + + return false; + + case cJSON_Array: + { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) + { + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: + { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) + { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, case_sensitive); + if (b_element == NULL) + { + return false; + } + + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) + { + a_element = get_object_item(a, b_element->string, case_sensitive); + if (a_element == NULL) + { + return false; + } + + if (!cJSON_Compare(b_element, a_element, case_sensitive)) + { + return false; + } + } + + return true; + } + + default: + return false; + } +} + +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) +{ + return global_hooks.allocate(size); +} + +CJSON_PUBLIC(void) cJSON_free(void *object) +{ + global_hooks.deallocate(object); +} diff --git a/src/modules/http/src/gcHandle.c b/src/modules/http/src/gcHandle.c new file mode 100644 index 000000000000..fedce0b1075e --- /dev/null +++ b/src/modules/http/src/gcHandle.c @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "gcHandle.h" +#include "cJSON.h" +#include "gcJson.h" +#include "tsdb.h" + +static HttpDecodeMethod gcDecodeMethod = {"grafana", gcProcessRequest}; +static HttpEncodeMethod gcHeartBeatMethod = {NULL, gcSendHeartBeatResp, NULL, NULL, NULL, NULL, NULL, NULL}; +static HttpEncodeMethod gcQueryMethod = { + NULL, gcStopQueryJson, gcBuildQueryJson, NULL, gcInitQueryJson, gcCleanQueryJson, NULL, NULL}; + +void gcInitHandle(HttpServer* pServer) { httpAddMethod(pServer, &gcDecodeMethod); } + +bool gcGetUserFromUrl(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + if (pParser->path[GC_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[GC_USER_URL_POS].len <= 0) { + return false; + } + + strcpy(pContext->user, pParser->path[GC_USER_URL_POS].pos); + return true; +} + +bool gcGetPassFromUrl(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + if (pParser->path[GC_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[GC_PASS_URL_POS].len <= 0) { + return false; + } + + strcpy(pContext->pass, pParser->path[GC_PASS_URL_POS].pos); + return true; +} + +bool gcProcessLoginRequest(HttpContext* pContext) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process grafana login msg", pContext, pContext->fd, pContext->ipstr, + pContext->user); + pContext->reqType = HTTP_REQTYPE_LOGIN; + return true; +} + +/** + * Process the query request + * @param fd for http send back + * @param context is taos conn + * @param filter, the request format is json, such as + */ + +// https://github.com/grafana/grafana/blob/master/docs/sources/plugins/developing/datasources.md +// input +//[{ +// "refId": "A", +// "alias" : "taosd", +// "sql" : "select first(taosd) from sys.mem where ts > now-6h and ts < now +// interval(20000a)" +//}, +//{ +// "refId": "B", +// "alias" : "system", +// "sql" : "select first(taosd) from sys.mem where ts > now-6h and ts < now +// interval(20000a)" +//}] +// output +//[{ +// "datapoints": [[339.386719, +// 1537873132000], +// [339.656250, +// 1537873162400], +// [339.656250, +// 1537873192600], +// [339.656250, +// 1537873222800], +// [339.589844, +// 1537873253200], +// [339.964844, +// 1537873283400], +// [340.093750, +// 1537873313800], +// [340.093750, +// 1537873344000], +// [340.093750, +// 1537873374200], +// [340.093750, +// 1537873404600]], +// "refId": "A", +// "target" : "taosd" +//}, +//{ +// "datapoints": [[339.386719, +// 1537873132000], +// [339.656250, +// 1537873162400], +// [339.656250, +// 1537873192600], +// [339.656250, +// 1537873222800], +// [339.589844, +// 1537873253200], +// [339.964844, +// 1537873283400], +// [340.093750, +// 1537873313800], +// [340.093750, +// 1537873344000], +// [340.093750, +// 1537873374200], +// [340.093750, +// 1537873404600]], +// "refId": "B", +// "target" : "system" +//}] + +bool gcProcessQueryRequest(HttpContext* pContext) { + httpTrace("context:%p, fd:%d, ip:%s, process grafana query msg", pContext, pContext->fd, pContext->ipstr); + + HttpParser* pParser = &pContext->pThread->parser; + char* filter = pParser->data.pos; + if (filter == NULL) { + httpSendErrorResp(pContext, HTTP_NO_MSG_INPUT); + return false; + } + + cJSON* root = cJSON_Parse(filter); + if (root == NULL) { + httpSendErrorResp(pContext, HTTP_PARSE_GC_REQ_ERROR); + return false; + } + + int size = cJSON_GetArraySize(root); + if (size <= 0) { + httpSendErrorResp(pContext, HTTP_GC_QUERY_NULL); + cJSON_Delete(root); + return false; + } + + if (size > 100) { + httpSendErrorResp(pContext, HTTP_GC_QUERY_SIZE); + cJSON_Delete(root); + return false; + } + + if (!httpMallocMultiCmds(pContext, size, HTTP_BUFFER_SIZE)) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + cJSON_Delete(root); + return false; + } + + for (int i = 0; i < size; ++i) { + cJSON* query = cJSON_GetArrayItem(root, i); + if (query == NULL) continue; + + cJSON* refId = cJSON_GetObjectItem(query, "refId"); + if (refId == NULL || refId->valuestring == NULL || strlen(refId->valuestring) == 0) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, refId is null", pContext, pContext->fd, pContext->ipstr, + pContext->user); + continue; + } + + int refIdBuffer = httpAddToSqlCmdBuffer(pContext, refId->valuestring); + if (refIdBuffer == -1) { + httpWarn("context:%p, fd:%d, ip:%s, user:%s, refId buffer is full", pContext, pContext->fd, pContext->ipstr, + pContext->user); + break; + } + + cJSON* alias = cJSON_GetObjectItem(query, "alias"); + int aliasBuffer = -1; + if (!(alias == NULL || alias->valuestring == NULL || strlen(alias->valuestring) == 0)) { + aliasBuffer = httpAddToSqlCmdBuffer(pContext, alias->valuestring); + if (aliasBuffer == -1) { + httpWarn("context:%p, fd:%d, ip:%s, user:%s, alias buffer is full", pContext, pContext->fd, pContext->ipstr, + pContext->user); + break; + } + } + if (aliasBuffer == -1) { + aliasBuffer = httpAddToSqlCmdBuffer(pContext, ""); + } + + cJSON* sql = cJSON_GetObjectItem(query, "sql"); + if (sql == NULL || sql->valuestring == NULL || strlen(sql->valuestring) == 0) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, sql is null", pContext, pContext->fd, pContext->ipstr, + pContext->user); + continue; + } + + int sqlBuffer = httpAddToSqlCmdBuffer(pContext, sql->valuestring); + if (sqlBuffer == -1) { + httpWarn("context:%p, fd:%d, ip:%s, user:%s, sql buffer is full", pContext, pContext->fd, pContext->ipstr, + pContext->user); + break; + } + + HttpSqlCmd* cmd = httpNewSqlCmd(pContext); + if (cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + cJSON_Delete(root); + return false; + } + + cmd->sql = sqlBuffer; + cmd->values = refIdBuffer; + cmd->table = aliasBuffer; + cmd->numOfRows = 0; // hack way as target flags + cmd->timestamp = httpAddToSqlCmdBufferWithSize(pContext, HTTP_GC_TARGET_SIZE + 1); // hack way + + if (cmd->timestamp == -1) { + httpWarn("context:%p, fd:%d, ip:%s, user:%s, cant't malloc target size, sql buffer is full", + pContext, pContext->fd, pContext->ipstr, pContext->user); + break; + } + } + + pContext->reqType = HTTP_REQTYPE_MULTI_SQL; + pContext->encodeMethod = &gcQueryMethod; + pContext->multiCmds->pos = 0; + + return true; +} + +bool gcProcessHeartbeatRequest(HttpContext* pContext) { + httpTrace("context:%p, fd:%d, ip:%s, process grafana heartbeat msg", pContext, pContext->fd, pContext->ipstr); + pContext->reqType = HTTP_REQTYPE_HEARTBEAT; + pContext->encodeMethod = &gcHeartBeatMethod; + return true; +} + +/** + * Process get/post/options msg, such as login and logout + */ +bool gcProcessRequest(struct HttpContext* pContext) { + if (httpUrlMatch(pContext, GC_ACTION_URL_POS, "login")) { + gcGetUserFromUrl(pContext); + gcGetPassFromUrl(pContext); + } + + if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) { + httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR); + return false; + } + + if (httpUrlMatch(pContext, GC_ACTION_URL_POS, "query")) { + return gcProcessQueryRequest(pContext); + } else if (httpUrlMatch(pContext, GC_ACTION_URL_POS, "heartbeat")) { + return gcProcessHeartbeatRequest(pContext); + } else if (httpUrlMatch(pContext, GC_ACTION_URL_POS, "login")) { + return gcProcessLoginRequest(pContext); + } else { + return gcProcessHeartbeatRequest(pContext); + } +} diff --git a/src/modules/http/src/gcJson.c b/src/modules/http/src/gcJson.c new file mode 100644 index 000000000000..f8e178c157d6 --- /dev/null +++ b/src/modules/http/src/gcJson.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "gcHandle.h" +#include "gcJson.h" +#include "httpJson.h" +#include "httpResp.h" + +unsigned char *base64_decode(const char *value, int inlen, int *outlen); + +void gcInitQueryJson(HttpContext *pContext) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + httpInitJsonBuf(jsonBuf, pContext); + httpWriteJsonBufHead(jsonBuf); + + // data array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); +} + +void gcCleanQueryJson(HttpContext *pContext) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // array end + httpJsonToken(jsonBuf, JsonArrEnd); + + httpWriteJsonBufEnd(jsonBuf); +} + +void gcWriteTargetStartJson(JsonBuf *jsonBuf, char *refId, char *target) { + if (strlen(target) == 0) { + target = refId; + } + + // object begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonObjStt); + + // target section + httpJsonPair(jsonBuf, "refId", 5, refId, (int)strlen(refId)); + httpJsonPair(jsonBuf, "target", 6, target, (int)strlen(target)); + + // data begin + httpJsonPairHead(jsonBuf, "datapoints", 10); + + // data array begin + httpJsonToken(jsonBuf, JsonArrStt); +} + +void gcWriteTargetEndJson(JsonBuf *jsonBuf) { + // data array end + httpJsonToken(jsonBuf, JsonArrEnd); + + // object end + httpJsonToken(jsonBuf, JsonObjEnd); +} + +void gcStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // write end of target + gcWriteTargetEndJson(jsonBuf); +} + +bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return false; + + int num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + if (num_fields == 0) { + return false; + } + + // such as select count(*) from sys.cpu + // such as select count(*) from sys.cpu group by ipaddr + // such as select count(*) from sys.cpu interval(1d) + // such as select count(*) from sys.cpu interval(1d) group by ipaddr + // such as select count(*) count(*) from sys.cpu group by ipaddr interval(1d) + int dataFields = -1; + int groupFields = -1; + bool hasTimestamp = fields[0].type == TSDB_DATA_TYPE_TIMESTAMP; + if (hasTimestamp) { + dataFields = 1; + if (num_fields > 2) groupFields = num_fields - 1; + } else { + dataFields = 0; + if (num_fields > 1) groupFields = num_fields - 1; + } + + char *refIdBuffer = httpGetCmdsString(pContext, cmd->values); + char *aliasBuffer = httpGetCmdsString(pContext, cmd->table); + char *targetBuffer = httpGetCmdsString(pContext, cmd->timestamp); + + if (groupFields == -1 && cmd->numOfRows == 0) { + gcWriteTargetStartJson(jsonBuf, refIdBuffer, aliasBuffer); + cmd->numOfRows += numOfRows; + } + + for (int i = 0; i < numOfRows; ++i) { + TAOS_ROW row = taos_fetch_row(result); + + // for group by + if (groupFields != -1) { + char target[HTTP_GC_TARGET_SIZE]; + + switch (fields[groupFields].type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%d", aliasBuffer, *((int8_t *)row[groupFields])); + break; + case TSDB_DATA_TYPE_SMALLINT: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%d", aliasBuffer, *((int16_t *)row[groupFields])); + break; + case TSDB_DATA_TYPE_INT: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%d", aliasBuffer, *((int32_t *)row[groupFields])); + break; + case TSDB_DATA_TYPE_BIGINT: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%ld", aliasBuffer, *((int64_t *)row[groupFields])); + break; + case TSDB_DATA_TYPE_FLOAT: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%.5f", aliasBuffer, *((float *)row[groupFields])); + break; + case TSDB_DATA_TYPE_DOUBLE: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%.9f", aliasBuffer, *((double *)row[groupFields])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%s", aliasBuffer, (char *)row[groupFields]); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%ld", aliasBuffer, *((int64_t *)row[groupFields])); + break; + default: + snprintf(target, HTTP_GC_TARGET_SIZE, "%s%s", aliasBuffer, "invalidcol"); + break; + } + + if (strcmp(target, targetBuffer) != 0) { + // first target not write this section + if (strlen(targetBuffer) != 0) { + gcWriteTargetEndJson(jsonBuf); + } + + // start new target + gcWriteTargetStartJson(jsonBuf, refIdBuffer, target); + strncpy(targetBuffer, target, HTTP_GC_TARGET_SIZE); + } + } // end of group by + + // data row array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + for (int i = dataFields; i >= 0; i--) { + httpJsonItemToken(jsonBuf); + if (row[i] == NULL) { + httpJsonString(jsonBuf, "NULL", 4); + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + httpJsonInt(jsonBuf, *((int8_t *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + httpJsonInt(jsonBuf, *((int16_t *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + httpJsonInt(jsonBuf, *((int32_t *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + httpJsonInt64(jsonBuf, *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + httpJsonFloat(jsonBuf, *((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + httpJsonDouble(jsonBuf, *((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + httpJsonStringForTransMean(jsonBuf, row[i], fields[i].bytes); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + httpJsonInt64(jsonBuf, *((int64_t *)row[i])); + break; + default: + httpJsonString(jsonBuf, "invalidcol", 10); + break; + } + } + + if (dataFields == 0) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, "-", 1); + } + + // data row array end + httpJsonToken(jsonBuf, JsonArrEnd); + } + + return true; +} + +void gcSendHeartBeatResp(HttpContext *pContext, HttpSqlCmd *cmd) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + char *desc = "Grafana server receive a quest from you!"; + + httpInitJsonBuf(jsonBuf, pContext); + + httpJsonToken(jsonBuf, JsonObjStt); + httpJsonPair(jsonBuf, "message", (int)strlen("message"), desc, (int)strlen(desc)); + httpJsonToken(jsonBuf, JsonObjEnd); + + char head[1024]; + + int hLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_GRAFANA], httpVersionStr[pContext->httpVersion], + httpKeepAliveStr[pContext->httpKeepAlive], (jsonBuf->lst - jsonBuf->buf)); + httpWriteBuf(pContext, head, hLen); + httpWriteBuf(pContext, jsonBuf->buf, (int)(jsonBuf->lst - jsonBuf->buf)); +} diff --git a/src/modules/http/src/httpAuth.c b/src/modules/http/src/httpAuth.c new file mode 100644 index 000000000000..cd00bded45c9 --- /dev/null +++ b/src/modules/http/src/httpAuth.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include "tutil.h" + +#include "http.h" +#include "httpHandle.h" +#include "tkey.h" + +bool httpParseBasicAuthToken(HttpContext *pContext, char *token, int len) { + token[len] = '\0'; + int outlen = 0; + char *base64 = (char *)base64_decode(token, len, &outlen); + if (base64 == NULL || outlen == 0) { + httpError("context:%p, fd:%d, ip:%s, basic token:%s parsed error", pContext, pContext->fd, pContext->ipstr, token); + return false; + } + + char *user = strstr(base64, ":"); + if (user == NULL) { + httpError("context:%p, fd:%d, ip:%s, basic token:%s invalid format", pContext, pContext->fd, pContext->ipstr, + token); + free(base64); + return false; + } + + int user_len = (int)(user - base64); + if (user_len < 1 || user_len >= TSDB_USER_LEN) { + httpError("context:%p, fd:%d, ip:%s, basic token:%s parse user error", pContext, pContext->fd, pContext->ipstr, + token); + free(base64); + return false; + } + strncpy(pContext->user, base64, (size_t)user_len); + + char *password = user + 1; + int pass_len = (int)((base64 + outlen) - password); + if (pass_len < 1 || pass_len >= TSDB_PASSWORD_LEN) { + httpError("context:%p, fd:%d, ip:%s, basic token:%s parse password error", pContext, pContext->fd, pContext->ipstr, + token); + free(base64); + return false; + } + strncpy(pContext->pass, password, (size_t)pass_len); + + free(base64); + httpTrace("context:%p, fd:%d, ip:%s, basic token parsed success, user:%s", pContext, pContext->fd, pContext->ipstr, + pContext->user); + return true; +} diff --git a/src/modules/http/src/httpCode.c b/src/modules/http/src/httpCode.c new file mode 100644 index 000000000000..e541872e2b8f --- /dev/null +++ b/src/modules/http/src/httpCode.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +char* httpMsg[] = { + "success", // 0 + "http server is not online", // 1 + "http url is not support", // 2 + "http method parse error", // 3 + "http version should be 1.0, 1.1 or 1.2", // 4 + "http head parse error", // 5 + "request size is too big", "http body size invalid", + "http chunked body parse error", // 8 + "http url parse error", // 9 + "invalid type of Authorization", + "no auth info input", + "no sql input", + "session list was full", + "no enough memory to alloc sqls", + "generate taosd token error", + "db and table can not be null", + "no need to execute use db cmd", + "parse grafana json error", + "size of multi request is 0", // 19 + "request is empty", // 20 + "no enough connections for http", // 21 + + // telegraf + "database name can not be null", // 22 + "database name too long", + "invalid telegraf json fromat", + "metrics size is 0", + "metrics size can not more than 20K", // 26 + "metric name not find", + "metric name type should be string", + "metric name length is 0", + "metric name length too long", + "timestamp not find", // 31 + "timestamp type should be integer", + "timestamp value smaller than 0", + "tags not find", + "tags size is 0", + "tags size too long", // 36 + "tag is null", "tag name is null", + "tag name length too long", // 39 + "tag value type should be number or string", + "tag value is null", + "table is null", // 42 + "table name length too long", + "fields not find", // 44 + "fields size is 0", + "fields size too long", + "field is null", // 47 + "field name is null", + "field name length too long", // 49 + "field value type should be number or string", + "field value is null", // 51 + "parse basic auth token error", + "parse taosd auth token error", + "host type should be string", + + // grafana + "query size is 0", // 55 + "query size can not more than 100", + + // opentsdb + "database name can not be null", // 57 + "database name too long", + "invalid opentsdb json fromat", // 59 + "metrics size is 0", + "metrics size can not more than 10K", // 61 + "metric name not find", + "metric name type should be string", + "metric name length is 0", + "metric name length can not more than 22", + "timestamp not find", + "timestamp type should be integer", + "timestamp value smaller than 0", + "tags not find", + "tags size is 0", + "tags size too long", // 71 + "tag is null", + "tag name is null", + "tag name length too long", // 74 + "tag value type should be boolean, number or string", + "tag value is null", + "tag value can not more than 64", // 77 + "value not find", + "value type should be boolean, number or string", + "stable not exist", + +}; diff --git a/src/modules/http/src/httpHandle.c b/src/modules/http/src/httpHandle.c new file mode 100644 index 000000000000..04cc27fe64ed --- /dev/null +++ b/src/modules/http/src/httpHandle.c @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" +#include "shash.h" +#include "taos.h" +#include "tglobalcfg.h" +#include "tsocket.h" +#include "ttimer.h" + +void httpToLowerUrl(char* url) { + /*ignore case */ + while (*url) { + if (*url >= 'A' && *url <= 'Z') { + *url = *url | 0x20; + } + url++; + } +} + +bool httpUrlMatch(HttpContext* pContext, int pos, char* cmp) { + HttpParser* pParser = &pContext->pThread->parser; + + if (pos < 0 || pos >= HTTP_MAX_URL) { + return false; + } + + if (pParser->path[pos].len <= 0) { + return false; + } + + if (strcmp(pParser->path[pos].pos, cmp) != 0) { + return false; + } + + return true; +} + +// /account/db/meter HTTP/1.1\r\nHost +bool httpParseURL(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + + char* pSeek; + char* pEnd = strchr(pParser->pLast, ' '); + if (*pParser->pLast != '/') { + return false; + } + pParser->pLast++; + + for (int i = 0; i < HTTP_MAX_URL; i++) { + pSeek = strchr(pParser->pLast, '/'); + if (pSeek == NULL) { + break; + } + pParser->path[i].pos = pParser->pLast; + if (pSeek <= pEnd) { + pParser->path[i].len = (int16_t)(pSeek - pParser->pLast); + pParser->path[i].pos[pParser->path[i].len] = 0; + httpToLowerUrl(pParser->path[i].pos); + pParser->pLast = pSeek + 1; + } else { + pParser->path[i].len = (int16_t)(pEnd - pParser->pLast); + pParser->path[i].pos[pParser->path[i].len] = 0; + httpToLowerUrl(pParser->path[i].pos); + pParser->pLast = pEnd + 1; + break; + } + } + pParser->pLast = pEnd + 1; + + // for (int i = 0; i < HTTP_MAX_URL; i++) { + // if (pParser->path[i].len > 0) { + // httpTrace("url_pos: %d, path: [%s]", i, pParser->path[i].pos); + // } + //} + + if (pParser->path[0].len == 0) { + httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL); + return false; + } + + return true; +} + +bool httpParseHttpVersion(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + + char* pEnd = strchr(pParser->pLast, '1'); + + if (*(pEnd + 1) != '.') { + httpError("context:%p, fd:%d, ip:%s, can't find http version at position:%s", pContext, pContext->fd, + pContext->ipstr, pParser->pLast); + httpSendErrorResp(pContext, HTTP_PARSE_HTTP_VERSION_ERROR); + return false; + } + + if (*(pEnd + 2) == '0') + pContext->httpVersion = HTTP_VERSION_10; + else if (*(pEnd + 2) == '1') + pContext->httpVersion = HTTP_VERSION_11; + else if (*(pEnd + 2) == '2') + pContext->httpVersion = HTTP_VERSION_11; + else + pContext->httpVersion = HTTP_VERSION_10; + + httpTrace("context:%p, fd:%d, ip:%s, httpVersion:1.%d", pContext, pContext->fd, pContext->ipstr, + pContext->httpVersion); + return true; +} + +bool httpGetNextLine(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + while (pParser->buffer + pParser->bufsize - pParser->pCur++ > 0) { + if (*(pParser->pCur) == '\n' && *(pParser->pCur - 1) == '\r') { + // cut the string + *pParser->pCur = 0; + return true; + } + } + + httpSendErrorResp(pContext, HTTP_PARSE_HEAD_ERROR); + + return false; +} + +bool httpGetHttpMethod(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + char* pSeek = strchr(pParser->pLast, ' '); + if (pSeek == NULL) { + httpSendErrorResp(pContext, HTTP_PARSE_HTTP_METHOD_ERROR); + return false; + } + pParser->method.pos = pParser->pLast; + pParser->method.len = (int16_t)(pSeek - pParser->pLast); + pParser->method.pos[pParser->method.len] = 0; + pParser->pLast = pSeek + 1; + + httpTrace("context:%p, fd:%d, ip:%s, httpMethod:%s", pContext, pContext->fd, pContext->ipstr, pParser->method.pos); + return true; +} + +bool httpGetDecodeMethod(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + HttpServer* pServer = pContext->pThread->pServer; + int methodLen = pServer->methodScannerLen; + for (int i = 0; i < methodLen; i++) { + HttpDecodeMethod* method = pServer->methodScanner[i]; + if (strcmp(method->module, pParser->path[0].pos) != 0) { + continue; + } + pParser->pMethod = method; + return true; + } + + httpError("context:%p, fd:%d, ip:%s, error:the url is not support, method:%s, path:%s", + pContext, pContext->fd, pContext->ipstr, pParser->method.pos, pParser->path[0].pos); + httpSendErrorResp(pContext, HTTP_UNSUPPORT_URL); + + return false; +} + +bool httpParseHead(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + if (strncasecmp(pParser->pLast, "Content-Length: ", 16) == 0) { + pParser->data.len = (int32_t)atoi(pParser->pLast + 16); + httpTrace("context:%p, fd:%d, ip:%s, Content-Length:%d", pContext, pContext->fd, pContext->ipstr, + pParser->data.len); + } else if (tsHttpEnableCompress && strncasecmp(pParser->pLast, "Accept-Encoding: ", 17) == 0) { + if (strstr(pParser->pLast + 17, "deflate") != NULL) { + pContext->compress = JsonCompress; + } + httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:%s", pContext, pContext->fd, pContext->ipstr, + pContext->compress == JsonCompress ? "deflate" : "identity"); + } else if (strncasecmp(pParser->pLast, "Connection: ", 12) == 0) { + if (strncasecmp(pParser->pLast + 12, "Keep-Alive", 10) == 0) { + pContext->httpKeepAlive = HTTP_KEEPALIVE_ENABLE; + } else { + pContext->httpKeepAlive = HTTP_KEEPALIVE_DISABLE; + } + httpTrace("context:%p, fd:%d, ip:%s, keepAlive:%d", pContext, pContext->fd, pContext->ipstr, + pContext->httpKeepAlive); + } else if (strncasecmp(pParser->pLast, "Transfer-Encoding: ", 19) == 0) { + if (strncasecmp(pParser->pLast + 19, "chunked", 7) == 0) { + pContext->httpChunked = HTTP_CHUNKED; + } + } else if (strncasecmp(pParser->pLast, "Authorization: ", 15) == 0) { + if (strncasecmp(pParser->pLast + 15, "Basic ", 6) == 0) { + pParser->token.pos = pParser->pLast + 21; + pParser->token.len = (int16_t)(pParser->pCur - pParser->token.pos - 1); + bool parsed = httpParseBasicAuthToken(pContext, pParser->token.pos, pParser->token.len); + if (!parsed) { + httpSendErrorResp(pContext, HTTP_INVALID_BASIC_AUTH_TOKEN); + return false; + } + } else if (strncasecmp(pParser->pLast + 15, "Taosd ", 6) == 0) { + httpSendErrorResp(pContext, HTTP_INVALID_TAOSD_AUTH_TOKEN); + } else { + httpSendErrorResp(pContext, HTTP_INVALID_AUTH_TOKEN); + return false; + } + } else { + } + + return true; +} + +bool httpParseChunkedBody(HttpContext* pContext, HttpParser* pParser, bool test) { + char* pEnd = pParser->buffer + pParser->bufsize; + char* pRet = pParser->data.pos; + char* pSize = pParser->data.pos; + size_t size = strtoul(pSize, NULL, 16); + if (size <= 0) return false; + + while (size > 0) { + char* pData = strstr(pSize, "\r\n"); + if (pData == NULL || pData >= pEnd) return false; + pData += 2; + + pSize = strstr(pData, "\r\n"); + if (pSize == NULL || pSize >= pEnd) return false; + if ((size_t)(pSize - pData) != size) return false; + pSize += 2; + + if (!test) { + memmove(pRet, pData, size); + pRet += size; + } + + size = strtoul(pSize, NULL, 16); + } + + if (!test) { + *pRet = '\0'; + } + + return true; +} + +bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { + for (int tryTimes = 0; tryTimes < 100; ++tryTimes) { + bool parsedOk = httpParseChunkedBody(pContext, pParser, true); + if (parsedOk) { + // httpTrace("context:%p, fd:%d, ip:%s, chunked body read finished", + // pContext, pContext->fd, pContext->ipstr); + httpParseChunkedBody(pContext, pParser, false); + return true; + } else { + httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, + pContext->ipstr); + if (!httpReadDataImp(pContext)) { + httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); + return false; + } else { + taosMsleep(1); + } + } + } + + httpError("context:%p, fd:%d, ip:%s, chunked body parsed error", pContext, pContext->fd, pContext->ipstr); + httpSendErrorResp(pContext, HTTP_PARSE_CHUNKED_BODY_ERROR); + + return false; +} + +bool httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) { + for (int tryTimes = 0; tryTimes < 100; ++tryTimes) { + int dataReadLen = pParser->bufsize - (int)(pParser->data.pos - pParser->buffer); + if (dataReadLen > pParser->data.len) { + httpError("context:%p, fd:%d, ip:%s, un-chunked body length invalid, dataReadLen:%d > pContext->data.len:%d", + pContext, pContext->fd, pContext->ipstr, dataReadLen, pParser->data.len); + httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR); + return false; + } else if (dataReadLen < pParser->data.len) { + httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, dataReadLen:%d < pContext->data.len:%d, continue read", + pContext, pContext->fd, pContext->ipstr, dataReadLen, pParser->data.len); + if (!httpReadDataImp(pContext)) { + httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); + return false; + } else { + taosMsleep(1); + } + } else { + return true; + } + } + + int dataReadLen = pParser->bufsize - (int)(pParser->data.pos - pParser->buffer); + if (dataReadLen != pParser->data.len) { + httpError("context:%p, fd:%d, ip:%s, un-chunked body length error, dataReadLen:%d != pContext->data.len:%d", + pContext, pContext->fd, pContext->ipstr, dataReadLen, pParser->data.len); + httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR); + return false; + } + + httpTrace("context:%p, fd:%d, ip:%s, un-chunked body read over, dataReadLen:%d == pContext->data.len:%d", + pContext, pContext->fd, pContext->ipstr, dataReadLen, pParser->data.len); + return true; +} + +bool httpParseRequest(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + + if (!httpGetHttpMethod(pContext)) { + return false; + } + + if (!httpParseURL(pContext)) { + return false; + } + + if (!httpParseHttpVersion(pContext)) { + return false; + } + + if (!httpGetDecodeMethod(pContext)) { + return false; + } + + do { + if (!httpGetNextLine(pContext)) { + return false; + } + + // Empty line, end of the HTTP HEAD + if (pParser->pCur - pParser->pLast == 1) { + pParser->data.pos = ++pParser->pCur; + break; + } + + if (!httpParseHead(pContext)) { + return false; + } + + pParser->pLast = ++pParser->pCur; + } while (1); + + if (pContext->httpChunked == HTTP_UNCUNKED) { + if (!httpReadUnChunkedBody(pContext, pParser)) { + return false; + } + } else { + if (!httpReadChunkedBody(pContext, pParser)) { + return false; + } + } + + httpTrace("context:%p, fd:%d, ip:%s, parse http request ok", pContext, pContext->fd, pContext->ipstr); + return true; +} + +bool httpDecodeRequest(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + if (pParser->pMethod->decodeFp == NULL) { + return false; + } + + return (*pParser->pMethod->decodeFp)(pContext); +} + +/** + * Process the request from http pServer + */ +bool httpProcessData(HttpContext* pContext) { + httpInitContext(pContext); + + if (!httpParseRequest(pContext)) { + httpCloseContextByApp(pContext); + return HTTP_PROCESS_ERROR; + } + + // handle Cross-domain request + if (strcmp(pContext->pThread->parser.method.pos, "OPTIONS") == 0) { + httpTrace("context:%p, fd:%d, ip:%s, process options request", pContext, pContext->fd, pContext->ipstr); + httpSendOptionResp(pContext, "process options request success"); + return HTTP_PROCESS_SUCCESS; + } + + if (!httpDecodeRequest(pContext)) { + httpCloseContextByApp(pContext); + return HTTP_PROCESS_SUCCESS; + } + + httpProcessRequest(pContext); + return HTTP_PROCESS_SUCCESS; +} diff --git a/src/modules/http/src/httpJson.c b/src/modules/http/src/httpJson.c new file mode 100644 index 000000000000..cb036d71c9ba --- /dev/null +++ b/src/modules/http/src/httpJson.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpJson.h" +#include "httpResp.h" +#include "taosmsg.h" + +#define MAX_NUM_STR_SZ 25 + +char JsonItmTkn = ','; +char JsonObjStt = '{'; +char JsonObjEnd = '}'; +char JsonArrStt = '['; +char JsonArrEnd = ']'; +char JsonStrStt = '\"'; +char JsonStrEnd = '\"'; +char JsonPairTkn = ':'; +char JsonNulTkn[] = "null"; +char JsonTrueTkn[] = "true"; +char JsonFalseTkn[] = "false"; + +int httpWriteBufByFd(int fd, const char* buf, int sz) { + const int countTimes = 3; + const int waitTime = 5; // 5ms + int len; + int countWait = 0; + + do { + if (fd > 2) + len = (int)send(fd, buf, (size_t)sz, MSG_NOSIGNAL); + else + len = sz; + if (len < 0) { + break; + } else if (len == 0) { + // wait & count + if (++countWait > countTimes) return -1; + sleep((uint32_t)waitTime); + } else { + countWait = 0; + } + buf += len; + } while (len < (sz -= len)); + + return sz; +} + +int httpWriteBuf(struct HttpContext* pContext, const char* buf, int sz) { + int writeSz = httpWriteBufByFd(pContext->fd, buf, sz); + + if (writeSz == -1) { + httpError("context:%p, fd:%d, ip:%s, size:%d, response failed:\n%s", pContext, pContext->fd, pContext->ipstr, sz, + buf); + } else { + httpTrace("context:%p, fd:%d, ip:%s, size:%d, response:\n%s", pContext, pContext->fd, pContext->ipstr, sz, buf); + } + + return writeSz; +} + +int httpWriteJsonBufBody(JsonBuf* buf) { + int remain = 0; + if (buf->pContext->fd <= 0) { + httpTrace("context:%p, fd:%d, ip:%s, write json body error", buf->pContext, buf->pContext->fd, + buf->pContext->ipstr); + buf->pContext->fd = -1; + } + + if (buf->lst == buf->buf) { + httpTrace("context:%p, fd:%d, ip:%s, no data need dump", buf->pContext, buf->pContext->fd, buf->pContext->ipstr); + return 0; // there is no data to dump. + } + + char sLen[24]; + uint64_t srcLen = (uint64_t)(buf->lst - buf->buf); + + /*HTTP servers often use compression to optimize transmission, for example + * with Content-Encoding: gzip or Content-Encoding: deflate. If both + * compression and chunked encoding are enabled, then the content stream is + * first compressed, then chunked; so the chunk encoding itself is not + * compressed, and the data in each chunk is not compressed individually. The + * remote endpoint then decodes the stream by concatenating the chunks and + * uncompressing the result.*/ + if (buf->pContext->compress == JsonUnCompress) { + int len = sprintf(sLen, "%lx\r\n", srcLen); + httpTrace("context:%p, fd:%d, ip:%s, write json body, chunk size:%lld", buf->pContext, buf->pContext->fd, + buf->pContext->ipstr, srcLen); + httpWriteBuf(buf->pContext, sLen, len); // dump chunk size + remain = httpWriteBuf(buf->pContext, buf->buf, (int)srcLen); + } else if (buf->pContext->compress == JsonCompress) { + // unsigned char compressBuf[JSON_BUFFER_SIZE] = { 0 }; + // uint64_t compressBufLen = sizeof(compressBuf); + // compress(compressBuf, &compressBufLen, (const unsigned char*)buf->buf, + // srcLen); + // int len = sprintf(sLen, "%lx\r\n", compressBufLen); + // + // httpTrace("context:%p, fd:%d, ip:%s, write json body, chunk size:%lld, + // compress:%ld", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, + // srcLen, compressBufLen); + // httpWriteBuf(buf->pContext, sLen, len);//dump chunk size + // remain = httpWriteBuf(buf->pContext, (const char*)compressBuf, + // (int)compressBufLen); + } else { + } + + httpWriteBuf(buf->pContext, "\r\n", 2); + buf->total += (int)(buf->lst - buf->buf); + buf->lst = buf->buf; + memset(buf->buf, 0, (size_t)buf->size); + + return remain; // remain>0 is system error +} + +void httpWriteJsonBufHead(JsonBuf* buf) { + if (buf->pContext->fd <= 0) { + buf->pContext->fd = -1; + } + + char msg[1024] = {0}; + int len = -1; + + if (buf->pContext->compress == JsonUnCompress) { + len = sprintf(msg, httpRespTemplate[HTTP_RESPONSE_CHUNKED_UN_COMPRESS], httpVersionStr[buf->pContext->httpVersion], + httpKeepAliveStr[buf->pContext->httpKeepAlive]); + } else { + len = sprintf(msg, httpRespTemplate[HTTP_RESPONSE_CHUNKED_COMPRESS], httpVersionStr[buf->pContext->httpVersion], + httpKeepAliveStr[buf->pContext->httpKeepAlive]); + } + + httpWriteBuf(buf->pContext, (const char*)msg, len); +} + +void httpWriteJsonBufEnd(JsonBuf* buf) { + if (buf->pContext->fd <= 0) { + httpTrace("context:%p, fd:%d, ip:%s, json buf fd is 0", buf->pContext, buf->pContext->fd, buf->pContext->ipstr); + buf->pContext->fd = -1; + } + + httpWriteJsonBufBody(buf); + httpWriteBuf(buf->pContext, "0\r\n\r\n", 5); // end of chunked resp +} + +void httpInitJsonBuf(JsonBuf* buf, struct HttpContext* pContext) { + buf->lst = buf->buf; + buf->total = 0; + buf->size = JSON_BUFFER_SIZE; // option setting + buf->pContext = pContext; + memset(buf->lst, 0, JSON_BUFFER_SIZE); + + httpTrace("context:%p, fd:%d, ip:%s, json buffer initialized", buf->pContext, buf->pContext->fd, + buf->pContext->ipstr); +} + +void httpJsonItemToken(JsonBuf* buf) { + char c = *(buf->lst - 1); + if (c == JsonArrStt || c == JsonObjStt || c == JsonPairTkn || c == JsonItmTkn) { + return; + } + if (buf->lst > buf->buf) httpJsonToken(buf, JsonItmTkn); +} + +void httpJsonString(JsonBuf* buf, char* sVal, int len) { + httpJsonItemToken(buf); + httpJsonToken(buf, JsonStrStt); + httpJsonPrint(buf, sVal, len); + httpJsonToken(buf, JsonStrEnd); +} + +void httpJsonOriginString(JsonBuf* buf, char* sVal, int len) { + httpJsonItemToken(buf); + httpJsonPrint(buf, sVal, len); +} + +void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) { + httpJsonItemToken(buf); + httpJsonToken(buf, JsonStrStt); + + if (sVal != NULL) { + // dispose transferred meaning byte + char* lastPos = sVal; + char* curPos = sVal; + + for (int i = 0; i < maxLen; ++i) { + if (*curPos == 0) { + break; + } + + if (*curPos == '\"') { + httpJsonPrint(buf, lastPos, (int)(curPos - lastPos)); + curPos++; + lastPos = curPos; + httpJsonPrint(buf, "\\\"", 2); + } else if (*curPos == '\\') { + httpJsonPrint(buf, lastPos, (int)(curPos - lastPos)); + curPos++; + lastPos = curPos; + httpJsonPrint(buf, "\\\\", 2); + } else { + curPos++; + } + } + + if (*lastPos) { + httpJsonPrint(buf, lastPos, (int)(curPos - lastPos)); + } + } + + httpJsonToken(buf, JsonStrEnd); +} + +void httpJsonInt64(JsonBuf* buf, int64_t num) { + httpJsonItemToken(buf); + httpJsonTestBuf(buf, MAX_NUM_STR_SZ); + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%lld", num); +} + +void httpJsonTimestamp(JsonBuf* buf, int64_t t) { + char ts[30] = {0}; + + struct tm* ptm; + time_t tt = t / 1000; + ptm = localtime(&tt); + int length = (int)strftime(ts, 30, "%Y-%m-%d %H:%M:%S", ptm); + + snprintf(ts+length, MAX_NUM_STR_SZ, ".%03ld", t % 1000); + + httpJsonString(buf, ts, length + 4); +} + +void httpJsonInt(JsonBuf* buf, int num) { + httpJsonItemToken(buf); + httpJsonTestBuf(buf, MAX_NUM_STR_SZ); + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%d", num); +} + +void httpJsonFloat(JsonBuf* buf, float num) { + httpJsonItemToken(buf); + httpJsonTestBuf(buf, MAX_NUM_STR_SZ); + if (num > 1E10 || num < -1E10) { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.5e", num); + } else { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.5f", num); + } +} + +void httpJsonDouble(JsonBuf* buf, double num) { + httpJsonItemToken(buf); + httpJsonTestBuf(buf, MAX_NUM_STR_SZ); + if (num > 1E10 || num < -1E10) { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.9e", num); + } else { + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%.9f", num); + } +} + +void httpJsonNull(JsonBuf* buf) { httpJsonString(buf, "null", 4); } + +void httpJsonBool(JsonBuf* buf, int val) { + if (val == 0) + httpJsonPrint(buf, JsonFalseTkn, sizeof(JsonFalseTkn)); + else + httpJsonPrint(buf, JsonTrueTkn, sizeof(JsonTrueTkn)); +} + +void httpJsonPairHead(JsonBuf* buf, char* name, int len) { + httpJsonItemToken(buf); + httpJsonString(buf, name, len); + httpJsonToken(buf, JsonPairTkn); +} + +void httpJsonPair(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen) { + httpJsonPairHead(buf, name, nameLen); + httpJsonString(buf, sVal, valLen); +} + +void httpJsonPairOriginString(JsonBuf* buf, char* name, int nameLen, char* sVal, int valLen) { + httpJsonPairHead(buf, name, nameLen); + httpJsonOriginString(buf, sVal, valLen); +} + +void httpJsonPairIntVal(JsonBuf* buf, char* name, int nNameLen, int num) { + httpJsonPairHead(buf, name, nNameLen); + httpJsonInt(buf, num); +} + +void httpJsonPairInt64Val(JsonBuf* buf, char* name, int nNameLen, int64_t num) { + httpJsonPairHead(buf, name, nNameLen); + httpJsonInt64(buf, num); +} + +void httpJsonPairBoolVal(JsonBuf* buf, char* name, int nNameLen, int num) { + httpJsonPairHead(buf, name, nNameLen); + httpJsonBool(buf, num); +} + +void httpJsonPairFloatVal(JsonBuf* buf, char* name, int nNameLen, float num) { + httpJsonPairHead(buf, name, nNameLen); + httpJsonFloat(buf, num); +} + +void httpJsonPairDoubleVal(JsonBuf* buf, char* name, int nNameLen, double num) { + httpJsonPairHead(buf, name, nNameLen); + httpJsonDouble(buf, num); +} + +void httpJsonPairNullVal(JsonBuf* buf, char* name, int nNameLen) { + httpJsonPairHead(buf, name, nNameLen); + httpJsonNull(buf); +} + +void httpJsonPairArray(JsonBuf* buf, char* name, int len, httpJsonBuilder fnBuilder, void* dsHandle) { + httpJsonPairHead(buf, name, len); + httpJsonArray(buf, fnBuilder, dsHandle); +} + +void httpJsonPairObject(JsonBuf* buf, char* name, int len, httpJsonBuilder fnBuilder, void* dsHandle) { + httpJsonPairHead(buf, name, len); + httpJsonObject(buf, fnBuilder, dsHandle); +} + +void httpJsonObject(JsonBuf* buf, httpJsonBuilder fnBuilder, void* dsHandle) { + httpJsonItemToken(buf); + httpJsonToken(buf, JsonObjStt); + (*fnBuilder)(buf, dsHandle); + httpJsonToken(buf, JsonObjEnd); +} + +void httpJsonArray(JsonBuf* buf, httpJsonBuilder fnBuilder, void* jsonHandle) { + httpJsonItemToken(buf); + httpJsonToken(buf, JsonArrStt); + (*fnBuilder)(buf, jsonHandle); + httpJsonToken(buf, JsonArrEnd); +} + +void httpJsonTestBuf(JsonBuf* buf, int safety) { + if ((buf->lst - buf->buf + safety) < buf->size) return; + // buf->slot = *buf->lst; + httpWriteJsonBufBody(buf); +} + +void httpJsonToken(JsonBuf* buf, char c) { + httpJsonTestBuf(buf, MAX_NUM_STR_SZ); // maybe object stack + *buf->lst++ = c; +} + +void httpJsonPrint(JsonBuf* buf, const char* json, int len) { + if (len == 0 || len >= JSON_BUFFER_SIZE) { + return; + } + + if (len > buf->size) { + httpWriteJsonBufBody(buf); + httpJsonPrint(buf, json, len); + // buf->slot = json[len - 1]; + return; + } + httpJsonTestBuf(buf, len + 2); + memcpy(buf->lst, json, (size_t)len); + buf->lst += len; +} + +void httpJsonPairStatus(JsonBuf* buf, int code) { + if (code == 0) { + httpJsonPair(buf, "status", 6, "succ", 4); + } else { + httpJsonPair(buf, "status", 6, "error", 5); + httpJsonItemToken(buf); + httpJsonPairIntVal(buf, "code", 4, code); + if (code >= 0) { + httpJsonItemToken(buf); + if (code == TSDB_CODE_DB_NOT_SELECTED) { + httpJsonPair(buf, "desc", 4, "failed to create database", 23); + } else if (code == TSDB_CODE_INVALID_TABLE) { + httpJsonPair(buf, "desc", 4, "failed to create table", 22); + } else + httpJsonPair(buf, "desc", 4, tsError[code], (int)strlen(tsError[code])); + } + } +} \ No newline at end of file diff --git a/src/modules/http/src/httpResp.c b/src/modules/http/src/httpResp.c new file mode 100644 index 000000000000..b5387b7cdba0 --- /dev/null +++ b/src/modules/http/src/httpResp.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "httpResp.h" +#include +#include +#include "httpCode.h" +#include "httpJson.h" + +extern char *tsError[]; + +const char *httpKeepAliveStr[] = {"", "Connection: Keep-Alive\r\n", "Connection: Close\r\n"}; + +const char *httpVersionStr[] = {"HTTP/1.0", "HTTP/1.1", "HTTP/1.2"}; + +const char *httpRespTemplate[] = { + // HTTP_RESPONSE_JSON_OK + // HTTP_RESPONSE_JSON_ERROR + "{\"status\":\"succ\",\"code\":%d,\"desc\":\"%s\"}", + "{\"status\":\"error\",\"code\":%d,\"desc\":\"%s\"}", + // HTTP_RESPONSE_OK + // HTTP_RESPONSE_ERROR + "%s 200 OK\r\nAccess-Control-Allow-Origin:*\r\n%sContent-Type: application/json;charset=utf-8\r\nContent-Length: %d\r\n\r\n", + "%s %d %s\r\nAccess-Control-Allow-Origin:*\r\n%sContent-Type: application/json;charset=utf-8\r\nContent-Length: %d\r\n\r\n", + // HTTP_RESPONSE_CHUNKED_UN_COMPRESS, HTTP_RESPONSE_CHUNKED_COMPRESS + "%s 200 OK\r\nAccess-Control-Allow-Origin:*\r\n%sContent-Type: application/json;charset=utf-8\r\nTransfer-Encoding: chunked\r\n\r\n", + "%s 200 OK\r\nAccess-Control-Allow-Origin:*\r\n%sContent-Type: application/json;charset=utf-8\r\nContent-Encoding: deflate\r\nTransfer-Encoding: chunked\r\n\r\n", + // HTTP_RESPONSE_OPTIONS + "%s 200 OK\r\nAccess-Control-Allow-Origin:*\r\n%sContent-Type: application/json;charset=utf-8\r\nContent-Length: %d\r\nAccess-Control-Allow-Methods: *\r\nAccess-Control-Max-Age: 3600\r\nAccess-Control-Allow-Headers: Origin, X-Requested-With, Content-Type, Accept, authorization\r\n\r\n", + // HTTP_RESPONSE_GRAFANA + "%s 200 OK\r\nAccess-Control-Allow-Origin:*\r\n%sAccess-Control-Allow-Methods:POST, GET, OPTIONS, DELETE, PUT\r\nAccess-Control-Allow-Headers:Accept, Content-Type\r\nContent-Type: application/json;charset=utf-8\r\nContent-Length: %d\r\n\r\n" +}; + +void httpSendErrResp(HttpContext *pContext, int httpCode, char *httpCodeStr, int errNo, char *desc) { + httpError("context:%p, fd:%d, ip:%s, code:%d, error:%d:%s", pContext, pContext->fd, pContext->ipstr, httpCode, errNo, + desc); + + char head[512] = {0}; + char body[512] = {0}; + + int bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_ERROR], errNo, desc); + int headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_ERROR], httpVersionStr[pContext->httpVersion], httpCode, + httpCodeStr, httpKeepAliveStr[pContext->httpKeepAlive], bodyLen); + + httpWriteBuf(pContext, head, headLen); + httpWriteBuf(pContext, body, bodyLen); + httpCloseContextByApp(pContext); +} + +void httpSendErrorRespWithDesc(HttpContext *pContext, int errNo, char *desc) { + int httpCode = 500; + char *httpCodeStr = "Internal Server Error"; + switch (errNo) { + case HTTP_SUCCESS: + httpCode = 200; + httpCodeStr = "OK"; + break; + case HTTP_SERVER_OFFLINE: + case HTTP_UNSUPPORT_URL: + httpCode = 404; + httpCodeStr = "Not Found"; + break; + case HTTP_PARSE_HTTP_METHOD_ERROR: + httpCode = 405; + httpCodeStr = "Method Not Allowed"; + break; + case HTTP_PARSE_HTTP_VERSION_ERROR: + httpCode = 505; + httpCodeStr = "HTTP Version Not Supported"; + break; + case HTTP_PARSE_HEAD_ERROR: + httpCode = 406; + httpCodeStr = "Not Acceptable"; + break; + case HTTP_REQUSET_TOO_BIG: + httpCode = 413; + httpCodeStr = "Request Entity Too Large"; + break; + case HTTP_PARSE_BODY_ERROR: + case HTTP_PARSE_CHUNKED_BODY_ERROR: + httpCode = 409; + httpCodeStr = "Conflict"; + break; + case HTTP_PARSE_URL_ERROR: + httpCode = 414; + httpCodeStr = "Request-URI Invalid"; + break; + case HTTP_INVALID_AUTH_TOKEN: + case HTTP_PARSE_USR_ERROR: + httpCode = 401; + httpCodeStr = "Unauthorized"; + break; + case HTTP_NO_SQL_INPUT: + httpCode = 400; + httpCodeStr = "Bad Request"; + break; + case HTTP_SESSION_FULL: + httpCode = 421; + httpCodeStr = "Too many connections"; + break; + case HTTP_NO_ENOUGH_MEMORY: + case HTTP_GEN_TAOSD_TOKEN_ERR: + httpCode = 507; + httpCodeStr = "Insufficient Storage"; + break; + case HTTP_INVALID_DB_TABLE: + case HTTP_NO_EXEC_USEDB: + case HTTP_PARSE_GC_REQ_ERROR: + case HTTP_INVALID_MULTI_REQUEST: + case HTTP_NO_MSG_INPUT: + httpCode = 400; + httpCodeStr = "Bad Request"; + break; + case HTTP_NO_ENOUGH_SESSIONS: + httpCode = 421; + httpCodeStr = "Too many connections"; + break; + // telegraf + case HTTP_TG_DB_NOT_INPUT: + case HTTP_TG_DB_TOO_LONG: + case HTTP_TG_INVALID_JSON: + case HTTP_TG_METRICS_NULL: + case HTTP_TG_METRICS_SIZE: + case HTTP_TG_METRIC_NULL: + case HTTP_TG_METRIC_TYPE: + case HTTP_TG_METRIC_NAME_NULL: + case HTTP_TG_METRIC_NAME_LONG: + case HTTP_TG_TIMESTAMP_NULL: + case HTTP_TG_TIMESTAMP_TYPE: + case HTTP_TG_TIMESTAMP_VAL_NULL: + case HTTP_TG_TAGS_NULL: + case HTTP_TG_TAGS_SIZE_0: + case HTTP_TG_TAGS_SIZE_LONG: + case HTTP_TG_TAG_NULL: + case HTTP_TG_TAG_NAME_NULL: + case HTTP_TG_TAG_NAME_SIZE: + case HTTP_TG_TAG_VALUE_TYPE: + case HTTP_TG_TAG_VALUE_NULL: + case HTTP_TG_TABLE_NULL: + case HTTP_TG_TABLE_SIZE: + case HTTP_TG_FIELDS_NULL: + case HTTP_TG_FIELDS_SIZE_0: + case HTTP_TG_FIELDS_SIZE_LONG: + case HTTP_TG_FIELD_NULL: + case HTTP_TG_FIELD_NAME_NULL: + case HTTP_TG_FIELD_NAME_SIZE: + case HTTP_TG_FIELD_VALUE_TYPE: + case HTTP_TG_FIELD_VALUE_NULL: + case HTTP_INVALID_BASIC_AUTH_TOKEN: + case HTTP_INVALID_TAOSD_AUTH_TOKEN: + case HTTP_TG_HOST_NOT_STRING: + // grafana + case HTTP_GC_QUERY_NULL: + case HTTP_GC_QUERY_SIZE: + httpCode = 400; + httpCodeStr = "Bad Request"; + break; + default: + httpError("context:%p, fd:%d, ip:%s, error:%d not recognized", pContext, pContext->fd, pContext->ipstr, errNo); + break; + } + + if (desc == NULL) { + httpSendErrResp(pContext, httpCode, httpCodeStr, errNo + 1000, httpMsg[errNo]); + } else { + httpSendErrResp(pContext, httpCode, httpCodeStr, errNo + 1000, desc); + } +} + +void httpSendErrorResp(HttpContext *pContext, int errNo) { httpSendErrorRespWithDesc(pContext, errNo, NULL); } + +void httpSendTaosdErrorResp(HttpContext *pContext, int errCode) { + int httpCode = 400; + httpSendErrResp(pContext, httpCode, "Bad Request", errCode, tsError[errCode]); +} + +void httpSendSuccResp(HttpContext *pContext, char *desc) { + char head[1024] = {0}; + char body[1024] = {0}; + + int bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], HTTP_SUCCESS, desc); + int headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OK], httpVersionStr[pContext->httpVersion], + httpKeepAliveStr[pContext->httpKeepAlive], bodyLen); + + httpWriteBuf(pContext, head, headLen); + httpWriteBuf(pContext, body, bodyLen); + httpCloseContextByApp(pContext); +} + +void httpSendOptionResp(HttpContext *pContext, char *desc) { + char head[1024] = {0}; + char body[1024] = {0}; + + int bodyLen = sprintf(body, httpRespTemplate[HTTP_RESPONSE_JSON_OK], HTTP_SUCCESS, desc); + int headLen = sprintf(head, httpRespTemplate[HTTP_RESPONSE_OPTIONS], httpVersionStr[pContext->httpVersion], + httpKeepAliveStr[pContext->httpKeepAlive], bodyLen); + + httpWriteBuf(pContext, head, headLen); + httpWriteBuf(pContext, body, bodyLen); + httpCloseContextByApp(pContext); +} diff --git a/src/modules/http/src/httpServer.c b/src/modules/http/src/httpServer.c new file mode 100644 index 000000000000..12d41740128b --- /dev/null +++ b/src/modules/http/src/httpServer.c @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tlog.h" +#include "tlog.h" +#include "tsocket.h" +#include "tutil.h" + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" + +void httpFreeContext(HttpServer *pServer, HttpContext *pContext); + +/** + * context will be reused while connection exist + * multiCmds and jsonBuf will be malloc after taos_query_a called + * and won't be freed until connection closed + */ +HttpContext *httpCreateContext(HttpServer *pServer) { + HttpContext *pContext = (HttpContext *)taosMemPoolMalloc(pServer->pContextPool); + if (pContext != NULL) { + pContext->fromMemPool = 1; + httpTrace("context:%p, is malloced from mempool", pContext); + } else { + pContext = (HttpContext *)malloc(sizeof(HttpContext)); + if (pContext == NULL) { + return NULL; + } else { + memset(pContext, 0, sizeof(HttpContext)); + } + httpTrace("context:%p, is malloced from raw memory", pContext); + } + + pContext->signature = pContext; + pContext->httpVersion = HTTP_VERSION_10; + if (pthread_mutex_init(&(pContext->mutex), NULL) < 0) { + httpFreeContext(pServer, pContext); + return NULL; + } + + return pContext; +} + +void httpFreeContext(HttpServer *pServer, HttpContext *pContext) { + pthread_mutex_unlock(&pContext->mutex); + pthread_mutex_destroy(&pContext->mutex); + + if (pContext->fromMemPool) { + httpTrace("context:%p, is freed from mempool", pContext); + taosMemPoolFree(pServer->pContextPool, (char *)pContext); + } else { + httpTrace("context:%p, is freed from raw memory", pContext); + tfree(pContext); + } +} + +void httpCleanUpContext(HttpThread *pThread, HttpContext *pContext) { + // for not keep-alive + if (pContext->fd >= 0) { + epoll_ctl(pThread->pollFd, EPOLL_CTL_DEL, pContext->fd, NULL); + taosCloseSocket(pContext->fd); + pContext->fd = -1; + } + + httpRestoreSession(pContext); + + pthread_mutex_lock(&pThread->threadMutex); + + pThread->numOfFds--; + httpTrace("context:%p, ip:%s, fd is cleaned up, thread:%s, numOfFds:%d", pContext, pContext->ipstr, pThread->label, + pThread->numOfFds); + if (pThread->numOfFds < 0) { + httpError("context:%p, ip:%s, thread:%s, number of FDs:%d shall never be negative", + pContext, pContext->ipstr, pThread->label, pThread->numOfFds); + pThread->numOfFds = 0; + } + + // remove from the link list + if (pContext->prev) { + (pContext->prev)->next = pContext->next; + } else { + pThread->pHead = pContext->next; + } + + if (pContext->next) { + (pContext->next)->prev = pContext->prev; + } + + pthread_mutex_unlock(&pThread->threadMutex); + + httpTrace("context:%p, ip:%s, thread:%s, numOfFds:%d, context is cleaned up", pContext, pContext->ipstr, + pThread->label, pThread->numOfFds); + + pContext->signature = 0; + pContext->fd = -1; + pContext->pThread = 0; + + // avoid double free + httpFreeJsonBuf(pContext); + httpFreeMultiCmds(pContext); + httpFreeContext(pThread->pServer, pContext); +} + +bool httpInitContext(HttpContext *pContext) { + pContext->accessTimes++; + pContext->httpVersion = HTTP_VERSION_10; + pContext->httpKeepAlive = HTTP_KEEPALIVE_NO_INPUT; + pContext->httpChunked = HTTP_UNCUNKED; + pContext->compress = JsonUnCompress; + pContext->usedByEpoll = 1; + pContext->usedByApp = 1; + pContext->reqType = HTTP_REQTYPE_OTHERS; + pContext->encodeMethod = NULL; + memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd)); + + httpTrace("context:%p, fd:%d, ip:%s, accessTimes:%d", pContext, pContext->fd, pContext->ipstr, pContext->accessTimes); + return true; +} + +void httpCloseContextByApp(HttpContext *pContext) { + HttpThread *pThread = pContext->pThread; + if (pContext->signature != pContext || pContext->pThread != pThread) { + return; + } + + pthread_mutex_lock(&pContext->mutex); + + httpTrace("context:%p, fd:%d, ip:%s, app use finished, usedByEpoll:%d, usedByApp:%d, httpVersion:1.%d, keepAlive:%d", + pContext, pContext->fd, pContext->ipstr, pContext->usedByEpoll, pContext->usedByApp, pContext->httpVersion, + pContext->httpKeepAlive); + + if (!pContext->usedByEpoll) { + httpCleanUpContext(pThread, pContext); + } else { + if (pContext->httpVersion == HTTP_VERSION_10 && pContext->httpKeepAlive != HTTP_KEEPALIVE_ENABLE) { + httpCleanUpContext(pThread, pContext); + } else if (pContext->httpVersion != HTTP_VERSION_10 && pContext->httpKeepAlive == HTTP_KEEPALIVE_DISABLE) { + httpCleanUpContext(pThread, pContext); + } else { + pContext->usedByApp = 0; + pthread_mutex_unlock(&pContext->mutex); + } + } +} + +void httpCloseContextByServer(HttpThread *pThread, HttpContext *pContext) { + if (pContext->signature != pContext || pContext->pThread != pThread) { + return; + } + pthread_mutex_lock(&pContext->mutex); + pContext->usedByEpoll = 0; + + httpTrace("context:%p, fd:%d, ip:%s, epoll use finished, usedByEpoll:%d, usedByApp:%d", + pContext, pContext->fd, pContext->ipstr, pContext->usedByEpoll, pContext->usedByApp); + + if (pContext->fd >= 0) { + epoll_ctl(pThread->pollFd, EPOLL_CTL_DEL, pContext->fd, NULL); + taosCloseSocket(pContext->fd); + pContext->fd = -1; + } + + if (!pContext->usedByApp) { + httpCleanUpContext(pThread, pContext); + } else { + pthread_mutex_unlock(&pContext->mutex); + } +} + +void httpCleanUpConnect(HttpServer *pServer) { + int i; + HttpThread *pThread; + + if (pServer == NULL) return; + + pthread_cancel(pServer->thread); + pthread_join(pServer->thread, NULL); + + for (i = 0; i < pServer->numOfThreads; ++i) { + pThread = pServer->pThreads + i; + taosCloseSocket(pThread->pollFd); + + pthread_mutex_lock(&pThread->threadMutex); + while (pThread->pHead) { + httpCleanUpContext(pThread, pThread->pHead); + pThread->pHead = pThread->pHead; + } + pthread_mutex_unlock(&pThread->threadMutex); + + pthread_cancel(pThread->thread); + pthread_join(pThread->thread, NULL); + pthread_cond_destroy(&(pThread->fdReady)); + pthread_mutex_destroy(&(pThread->threadMutex)); + } + + tfree(pServer->pThreads); + httpTrace("http server:%s is cleaned up", pServer->label); +} + +// read all the data, then just discard it +void httpReadDirtyData(int fd) { + char data[1024] = {0}; + int len = (int)taosReadSocket(fd, data, 1024); + while (len >= sizeof(data)) { + len = (int)taosReadSocket(fd, data, 1024); + } +} + +bool httpReadDataImp(HttpContext *pContext) { + HttpParser *pParser = &pContext->pThread->parser; + + int blocktimes = 0; + while (pParser->bufsize <= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) { + int nread = (int)taosReadSocket(pContext->fd, pParser->buffer + pParser->bufsize, HTTP_STEP_SIZE); + if (nread >= 0 && nread < HTTP_STEP_SIZE) { + pParser->bufsize += nread; + break; + } else if (nread < 0) { + if (errno == EINTR) { + if (blocktimes++ > 1000) { + httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, EINTER too many times", + pContext, pContext->fd, pContext->ipstr, errno); + break; + } + continue; + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { + taosMsleep(1); + if (blocktimes++ > 1000) { + httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, EAGAIN too many times", + pContext, pContext->fd, pContext->ipstr, errno); + break; + } + continue; + } else { + httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect", + pContext, pContext->fd, pContext->ipstr, errno); + return false; + } + } else { + pParser->bufsize += nread; + } + + if (pParser->bufsize >= (HTTP_BUFFER_SIZE - HTTP_STEP_SIZE)) { + httpReadDirtyData(pContext->fd); + httpError("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, request big than:%d", + pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, + HTTP_BUFFER_SIZE); + httpSendErrorResp(pContext, HTTP_REQUSET_TOO_BIG); + return false; + } + } + + pParser->buffer[pParser->bufsize] = 0; + httpDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, content:\n%s", + pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, + pParser->bufsize, pParser->buffer); + + return true; +} + +bool httpReadData(HttpContext *pContext) { + HttpParser *pParser = &pContext->pThread->parser; + memset(pParser, 0, sizeof(HttpParser)); + pParser->pCur = pParser->pLast = pParser->buffer = pContext->pThread->buffer; + return httpReadDataImp(pContext); +} + +void httpProcessHttpData(void *param) { + HttpThread * pThread = (HttpThread *)param; + HttpContext *pContext; + int fdNum; + + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGPIPE); + pthread_sigmask(SIG_SETMASK, &set, NULL); + + while (1) { + pthread_mutex_lock(&pThread->threadMutex); + if (pThread->numOfFds < 1) { + pthread_cond_wait(&pThread->fdReady, &pThread->threadMutex); + } + pthread_mutex_unlock(&pThread->threadMutex); + + struct epoll_event events[HTTP_MAX_EVENTS]; + //-1 means uncertainty, 0-nowait, 1-wait 1 ms, set it from -1 to 1 + fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, 1); + if (fdNum <= 0) continue; + + for (int i = 0; i < fdNum; ++i) { + pContext = events[i].data.ptr; + if (pContext->signature != pContext || pContext->pThread != pThread || pContext->fd <= 0) { + continue; + } + + if (events[i].events & EPOLLPRI) { + httpTrace("context:%p, fd:%d, ip:%s, EPOLLPRI events occured, close connect", pContext, pContext->fd, + pContext->ipstr); + httpCloseContextByServer(pThread, pContext); + continue; + } + + if (events[i].events & EPOLLRDHUP) { + httpTrace("context:%p, fd:%d, ip:%s, EPOLLRDHUP events occured, close connect", + pContext, pContext->fd, pContext->ipstr); + httpCloseContextByServer(pThread, pContext); + continue; + } + + if (events[i].events & EPOLLERR) { + httpTrace("context:%p, fd:%d, ip:%s, EPOLLERR events occured, close connect", pContext, pContext->fd, + pContext->ipstr); + httpCloseContextByServer(pThread, pContext); + continue; + } + + if (events[i].events & EPOLLHUP) { + httpTrace("context:%p, fd:%d, ip:%s, EPOLLHUP events occured, close connect", pContext, pContext->fd, + pContext->ipstr); + httpCloseContextByServer(pThread, pContext); + continue; + } + + if (pContext->usedByApp) { + httpTrace("context:%p, fd:%d, ip:%s, still used by app, accessTimes:%d, try again", + pContext, pContext->fd, pContext->ipstr, pContext->accessTimes); + continue; + } + + if (!httpReadData(pContext)) { + httpTrace("context:%p, fd:%d, ip:%s, read data error", pContext, pContext->fd, pContext->ipstr); + httpCloseContextByServer(pThread, pContext); + continue; + } + + if (!pContext->pThread->pServer->online) { + httpSendErrorResp(pContext, HTTP_SERVER_OFFLINE); + httpTrace("context:%p, fd:%d, ip:%s, server is not online", pContext, pContext->fd, pContext->ipstr); + httpCloseContextByServer(pThread, pContext); + continue; + } + + __sync_fetch_and_add(&pThread->pServer->requestNum, 1); + + if (!(*(pThread->processData))(pContext)) { + httpError("context:%p, fd:%d, ip:%s, app force closed", pContext, pContext->fd, pContext->ipstr, + pContext->accessTimes); + httpCloseContextByServer(pThread, pContext); + } + } + } +} + +void httpAcceptHttpConnection(void *arg) { + int connFd = -1; + struct sockaddr_in clientAddr; + int sockFd; + int threadId = 0; + HttpThread * pThread; + HttpServer * pServer; + HttpContext * pContext; + + pServer = (HttpServer *)arg; + + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGPIPE); + pthread_sigmask(SIG_SETMASK, &set, NULL); + + sockFd = taosOpenTcpServerSocket(pServer->serverIp, pServer->serverPort); + + if (sockFd < 0) { + httpError("http server:%s, failed to open http socket, ip:%s:%u", pServer->label, pServer->serverIp, + pServer->serverPort); + return; + } else { + httpPrint("http service init success at ip:%s:%u", pServer->serverIp, pServer->serverPort); + pServer->online = true; + } + + while (1) { + socklen_t addrlen = sizeof(clientAddr); + connFd = (int)accept(sockFd, (struct sockaddr *)&clientAddr, &addrlen); + + if (connFd < 3) { + httpError("http server:%s, accept connect failure, errno:%d, reason:%s", pServer->label, errno, strerror(errno)); + continue; + } + + taosKeepTcpAlive(connFd); + taosSetNonblocking(connFd, 1); + + // pick up the thread to handle this connection + pThread = pServer->pThreads + threadId; + + pContext = httpCreateContext(pServer); + if (pContext == NULL) { + httpError("fd:%d, ip:%s:%u, no enough resource to allocate http context", connFd, inet_ntoa(clientAddr.sin_addr), + htons(clientAddr.sin_port)); + taosCloseSocket(connFd); + continue; + } + + httpTrace("context:%p, fd:%d, ip:%s:%u, thread:%s, accept a new connection", pContext, connFd, + inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port), pThread->label); + + pContext->fd = connFd; + sprintf(pContext->ipstr, "%s:%d", inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port)); + pContext->pThread = pThread; + + struct epoll_event event; +// add this new FD into epoll +#ifndef _NINGSI_VERSION + event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP | EPOLLERR | EPOLLHUP | EPOLLRDHUP; +#else + event.events = EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP; +#endif + + event.data.ptr = pContext; + if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, connFd, &event) < 0) { + httpError("context:%p, fd:%d, ip:%s:%u, thread:%s, failed to add http fd for epoll, error:%s", + pContext, connFd, inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port), pThread->label, + strerror(errno)); + httpFreeContext(pThread->pServer, pContext); + tclose(connFd); + continue; + } + + // notify the data process, add into the FdObj list + pthread_mutex_lock(&(pThread->threadMutex)); + + pContext->next = pThread->pHead; + + if (pThread->pHead) (pThread->pHead)->prev = pContext; + + pThread->pHead = pContext; + + pThread->numOfFds++; + pthread_cond_signal(&pThread->fdReady); + + pthread_mutex_unlock(&(pThread->threadMutex)); + + httpTrace("context:%p, fd:%d, ip:%s:%u, thread:%s, numOfFds:%d, begin read request", + pContext, connFd, inet_ntoa(clientAddr.sin_addr), htons(clientAddr.sin_port), pThread->label, + pThread->numOfFds); + + // pick up next thread for next connection + threadId++; + threadId = threadId % pServer->numOfThreads; + } +} + +bool httpInitConnect(HttpServer *pServer) { + int i; + pthread_attr_t thattr; + HttpThread * pThread; + + pServer->pThreads = (HttpThread *)malloc(sizeof(HttpThread) * (size_t)pServer->numOfThreads); + if (pServer->pThreads == NULL) { + httpError("init error no enough memory"); + return false; + } + memset(pServer->pThreads, 0, sizeof(HttpThread) * (size_t)pServer->numOfThreads); + + pThread = pServer->pThreads; + for (i = 0; i < pServer->numOfThreads; ++i) { + sprintf(pThread->label, "%s%d", pServer->label, i); + pThread->pServer = pServer; + pThread->processData = pServer->processData; + pThread->threadId = i; + + if (pthread_mutex_init(&(pThread->threadMutex), NULL) < 0) { + httpError("http thread:%s, failed to init HTTP process data mutex, reason:%s", pThread->label, strerror(errno)); + return false; + } + + if (pthread_cond_init(&(pThread->fdReady), NULL) != 0) { + httpError("http thread:%s, init HTTP condition variable failed, reason:%s\n", pThread->label, strerror(errno)); + return false; + } + + pThread->pollFd = epoll_create(HTTP_MAX_EVENTS); // size does not matter + if (pThread->pollFd < 0) { + httpError("http thread:%s, failed to create HTTP epoll", pThread->label); + return false; + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&(pThread->thread), &thattr, (void *)httpProcessHttpData, (void *)(pThread)) != 0) { + httpError("http thread:%s, failed to create HTTP process data thread, reason:%s", + pThread->label, strerror(errno)); + return false; + } + + httpTrace("http thread:%p:%s, initialized", pThread, pThread->label); + pThread++; + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&(pServer->thread), &thattr, (void *)httpAcceptHttpConnection, (void *)(pServer)) != 0) { + httpError("http server:%s, failed to create Http accept thread, reason:%s", pServer->label, strerror(errno)); + return false; + } + + pthread_attr_destroy(&thattr); + + httpTrace("http server:%s, initialized, ip:%s:%u, numOfThreads:%d", pServer->label, pServer->serverIp, + pServer->serverPort, pServer->numOfThreads); + return true; +} diff --git a/src/modules/http/src/httpSession.c b/src/modules/http/src/httpSession.c new file mode 100644 index 000000000000..c13cc4c83b9f --- /dev/null +++ b/src/modules/http/src/httpSession.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" + +#include "shash.h" +#include "taos.h" +#include "ttime.h" +#include "ttimer.h" + +void httpAccessSession(HttpContext *pContext) { + if (pContext->session == pContext->session->signature) + pContext->session->expire = (int)taosGetTimestampSec() + pContext->pThread->pServer->sessionExpire; +} + +void httpCreateSession(HttpContext *pContext, void *taos) { + HttpServer *server = pContext->pThread->pServer; + pthread_mutex_lock(&server->serverMutex); + + if (pContext->session != NULL && pContext->session == pContext->session->signature) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, set exist session:%p:%s:%p expired", pContext, pContext->fd, + pContext->ipstr, pContext->user, pContext->session, pContext->session->id, pContext->session->taos); + pContext->session->expire = 0; + pContext->session->access--; + } + + HttpSession session; + session.taos = taos; + session.expire = (int)taosGetTimestampSec() + server->sessionExpire; + session.access = 1; + strcpy(session.id, pContext->user); + pContext->session = (HttpSession *)taosAddStrHash(server->pSessionHash, session.id, (char *)(&session)); + if (pContext->session == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, error:%s", pContext, pContext->fd, pContext->ipstr, pContext->user, + httpMsg[HTTP_SESSION_FULL]); + taos_close(taos); + } + + pContext->session->signature = pContext->session; + httpTrace("context:%p, fd:%d, ip:%s, user:%s, create a new session:%p:%s:%p", pContext, pContext->fd, pContext->ipstr, + pContext->user, pContext->session, pContext->session->id, pContext->session->taos); + pthread_mutex_unlock(&server->serverMutex); +} + +void httpFetchSession(HttpContext *pContext) { + HttpServer *server = pContext->pThread->pServer; + pthread_mutex_lock(&server->serverMutex); + + pContext->session = (HttpSession *)taosGetStrHashData(server->pSessionHash, pContext->user); + if (pContext->session != NULL && pContext->session == pContext->session->signature) { + pContext->session->access++; + httpTrace("context:%p, fd:%d, ip:%s, user:%s, find an exist session:%p:%s:%p, access:%d, expire:%d", + pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session, pContext->session->id, + pContext->session->taos, pContext->session->access, pContext->session->expire); + pContext->session->expire = (int)taosGetTimestampSec() + server->sessionExpire; + } else { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, session not found", pContext, pContext->fd, pContext->ipstr, + pContext->user); + } + + pthread_mutex_unlock(&server->serverMutex); +} + +void httpRestoreSession(HttpContext *pContext) { + HttpServer * server = pContext->pThread->pServer; + HttpSession *session = pContext->session; + if (session == NULL || session != session->signature) return; + + pthread_mutex_lock(&server->serverMutex); + session->access--; + httpTrace("context:%p, fd:%d, ip:%s, user:%s, restore session:%p:%s:%p, access:%d, expire:%d", + pContext, pContext->fd, pContext->ipstr, pContext->user, session, session->id, session->taos, + session->access, pContext->session->expire); + pthread_mutex_unlock(&server->serverMutex); +} + +void httpResetSession(char *session) { + HttpSession *pSession = (HttpSession *)session; + httpTrace("close session:%p:%s:%p", pSession, pSession->id, pSession->taos); + if (pSession->taos != NULL) { + taos_close(pSession->taos); + pSession->taos = NULL; + } + pSession->signature = NULL; +} + +void httpRemoveAllSessions(HttpServer *pServer) { + if (pServer->pSessionHash != NULL) { + taosCleanUpStrHashWithFp(pServer->pSessionHash, httpResetSession); + pServer->pSessionHash = NULL; + } +} + +bool httpInitAllSessions(HttpServer *pServer) { + if (pServer->pSessionHash == NULL) { + pServer->pSessionHash = taosInitStrHash(100, sizeof(HttpSession), taosHashStringStep1); + } + if (pServer->pSessionHash == NULL) { + httpError("http init session pool failed"); + return false; + } + if (pServer->expireTimer == NULL) { + taosTmrReset(httpProcessSessionExpire, 50000, pServer, pServer->timerHandle, &pServer->expireTimer); + } + + return true; +} + +int httpSessionExpired(char *session) { + HttpSession *pSession = (HttpSession *)session; + time_t cur = time(NULL); + + if (pSession->taos != NULL) { + if (pSession->expire > cur) { + return 0; // un-expired, so return false + } + if (pSession->access > 0) { + httpTrace("session:%p:%s:%p is expired, but still access:%d", pSession, pSession->id, pSession->taos, + pSession->access); + return 0; // still used, so return false + } + httpTrace("need close session:%p:%s:%p for it expired, cur:%d, expire:%d, invertal:%d", + pSession, pSession->id, pSession->taos, cur, pSession->expire, cur - pSession->expire); + } + + return 1; +} + +void httpRemoveExpireSessions(HttpServer *pServer) { + int expiredNum = 0; + do { + pthread_mutex_lock(&pServer->serverMutex); + + HttpSession *pSession = (HttpSession *)taosVisitStrHashWithFp(pServer->pSessionHash, httpSessionExpired); + if (pSession == NULL) { + pthread_mutex_unlock(&pServer->serverMutex); + break; + } + + httpResetSession((char *)pSession); + taosDeleteStrHashNode(pServer->pSessionHash, pSession->id, pSession); + + pthread_mutex_unlock(&pServer->serverMutex); + + if (++expiredNum > 10) { + break; + } + } while (true); +} + +void httpProcessSessionExpire(void *handle, void *tmrId) { + HttpServer *pServer = (HttpServer *)handle; + httpRemoveExpireSessions(pServer); + taosTmrReset(httpProcessSessionExpire, 60000, pServer, pServer->timerHandle, &pServer->expireTimer); +} \ No newline at end of file diff --git a/src/modules/http/src/httpSql.c b/src/modules/http/src/httpSql.c new file mode 100644 index 000000000000..bfd6189539da --- /dev/null +++ b/src/modules/http/src/httpSql.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" +#include "taos.h" +#include "tsclient.h" + +void *taos_connect_a(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), + void *param, void **taos); +void httpProcessMultiSql(HttpContext *pContext); + +void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) { + HttpContext *pContext = (HttpContext *)param; + if (pContext == NULL || pContext->signature != pContext) return; + + HttpSqlCmds * multiCmds = pContext->multiCmds; + HttpEncodeMethod *encode = pContext->encodeMethod; + + HttpSqlCmd *singleCmd = multiCmds->cmds + multiCmds->pos; + char * sql = httpGetCmdsString(pContext, singleCmd->sql); + + bool isContinue = false; + + if (numOfRows > 0) { + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->buildQueryJsonFp) { + isContinue = (encode->buildQueryJsonFp)(pContext, singleCmd, result, numOfRows); + } + } + + if (isContinue) { + // retrieve next batch of rows + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, numOfRows, sql); + taos_fetch_rows_a(result, httpProcessMultiSqlRetrieveCallBack, param); + } else { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, stop retrieve, numOfRows:%d, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, numOfRows, sql); + + if (numOfRows < 0) { + httpError("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, retrieve failed code:%d, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, -numOfRows, sql); + } else { + taos_free_result(result); + } + + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->stopJsonFp) { + (encode->stopJsonFp)(pContext, singleCmd); + } + multiCmds->pos++; + httpProcessMultiSql(pContext); + } +} + +void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) { + HttpContext *pContext = (HttpContext *)param; + if (pContext == NULL || pContext->signature != pContext) return; + + HttpSqlCmds * multiCmds = pContext->multiCmds; + HttpEncodeMethod *encode = pContext->encodeMethod; + + HttpSqlCmd *singleCmd = multiCmds->cmds + multiCmds->pos; + char * sql = httpGetCmdsString(pContext, singleCmd->sql); + + if (-code == TSDB_CODE_ACTION_IN_PROGRESS) { + httpWarn("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, code:%d:inprogress, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, -code, sql); + return; + } + + if (code < 0) { + if (encode->checkFinishedFp != NULL && !encode->checkFinishedFp(pContext, singleCmd, code >= 0 ? 0 : -code)) { + singleCmd->code = -code; + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos jump to:%d, last code:%d, last sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos + 1, -code, sql); + } else { + singleCmd->code = -code; + httpError("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, error code:%d, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, -code, sql); + + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN) { + if (encode->startJsonFp) (encode->startJsonFp)(pContext, singleCmd, result); + if (encode->stopJsonFp) (encode->stopJsonFp)(pContext, singleCmd); + } + } + multiCmds->pos++; + httpProcessMultiSql(pContext); + return; + } + + if (result == NULL) { + // not select or show commands + int affectRows = code; + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, affect rows:%d, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, affectRows, sql); + + singleCmd->code = 0; + + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->startJsonFp) { + (encode->startJsonFp)(pContext, singleCmd, result); + } + + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->buildAffectRowJsonFp) { + (encode->buildAffectRowJsonFp)(pContext, singleCmd, affectRows); + } + + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->stopJsonFp) { + (encode->stopJsonFp)(pContext, singleCmd); + } + + if (encode->setNextCmdFp) { + (encode->setNextCmdFp)(pContext, singleCmd, code); + } else { + multiCmds->pos++; + } + + httpProcessMultiSql(pContext); + } else { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start retrieve, sql:%s", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, sql); + + if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->startJsonFp) { + (encode->startJsonFp)(pContext, singleCmd, result); + } + taos_fetch_rows_a(result, httpProcessMultiSqlRetrieveCallBack, pContext); + } +} + +void httpProcessMultiSql(HttpContext *pContext) { + HttpSqlCmds * multiCmds = pContext->multiCmds; + HttpEncodeMethod *encode = pContext->encodeMethod; + + if (multiCmds->pos >= multiCmds->size) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, size:%d, stop mulit-querys", + pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, multiCmds->size); + if (encode->cleanJsonFp) { + (encode->cleanJsonFp)(pContext); + } + httpCloseContextByApp(pContext); + return; + } + + HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos; + + char *sql = httpGetCmdsString(pContext, cmd->sql); + httpDump("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, + pContext->ipstr, pContext->user, multiCmds->pos, sql); + taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext); +} + +void httpProcessMultiSqlCmd(HttpContext *pContext) { + if (pContext == NULL || pContext->signature != pContext) return; + + HttpSqlCmds *multiCmds = pContext->multiCmds; + if (multiCmds == NULL || multiCmds->size <= 0 || multiCmds->pos >= multiCmds->size || multiCmds->pos < 0) { + httpSendErrorResp(pContext, HTTP_INVALID_MULTI_REQUEST); + return; + } + + httpTrace("context:%p, fd:%d, ip:%s, user:%s, start multi-querys pos:%d, size:%d", pContext, pContext->fd, + pContext->ipstr, pContext->user, multiCmds->pos, multiCmds->size); + HttpEncodeMethod *encode = pContext->encodeMethod; + if (encode->initJsonFp) { + (encode->initJsonFp)(pContext); + } + + httpProcessMultiSql(pContext); +} + +void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) { + HttpContext *pContext = (HttpContext *)param; + if (pContext == NULL || pContext->signature != pContext) return; + + HttpEncodeMethod *encode = pContext->encodeMethod; + + bool isContinue = false; + + if (numOfRows > 0) { + if (encode->buildQueryJsonFp) { + isContinue = (encode->buildQueryJsonFp)(pContext, &pContext->singleCmd, result, numOfRows); + } + } + + if (isContinue) { + // retrieve next batch of rows + httpTrace("context:%p, fd:%d, ip:%s, user:%s, continue retrieve, numOfRows:%d", pContext, pContext->fd, + pContext->ipstr, pContext->user, numOfRows); + taos_fetch_rows_a(result, httpProcessSingleSqlRetrieveCallBack, param); + } else { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, stop retrieve, numOfRows:%d", pContext, pContext->fd, pContext->ipstr, + pContext->user, numOfRows); + + if (numOfRows < 0) { + httpError("context:%p, fd:%d, ip:%s, user:%s, retrieve failed, code:%d", pContext, pContext->fd, pContext->ipstr, + pContext->user, -numOfRows); + } else { + taos_free_result(result); + } + + if (encode->stopJsonFp) { + (encode->stopJsonFp)(pContext, &pContext->singleCmd); + } + + httpCloseContextByApp(pContext); + } +} + +void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) { + HttpContext *pContext = (HttpContext *)param; + if (pContext == NULL || pContext->signature != pContext) return; + + HttpEncodeMethod *encode = pContext->encodeMethod; + + if (-code == TSDB_CODE_ACTION_IN_PROGRESS) { + httpError("context:%p, fd:%d, ip:%s, user:%s, query error, taos:%p, code:%d:inprogress, sqlObj:%p", + pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session->taos, -code, (SSqlObj *)result); + return; + } + + if (code < 0) { + SSqlObj *pObj = (SSqlObj *)result; + httpError("context:%p, fd:%d, ip:%s, user:%s, query error, taos:%p, code:%d, sqlObj:%p", + pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session->taos, -code, pObj); + + httpSendTaosdErrorResp(pContext, -code); + return; + } + + if (result == NULL) { + // not select or show commands + int affectRows = code; + + httpTrace("context:%p, fd:%d, ip:%s, user:%s, affect rows:%d, stop query, sqlObj:%p", + pContext, pContext->fd, pContext->ipstr, pContext->user, affectRows, result); + + if (encode->startJsonFp) { + (encode->startJsonFp)(pContext, &pContext->singleCmd, result); + } + + if (encode->buildAffectRowJsonFp) { + (encode->buildAffectRowJsonFp)(pContext, &pContext->singleCmd, affectRows); + } + + if (encode->stopJsonFp) { + (encode->stopJsonFp)(pContext, &pContext->singleCmd); + } + + httpCloseContextByApp(pContext); + } else { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, start retrieve", pContext, pContext->fd, pContext->ipstr, + pContext->user); + + if (encode->startJsonFp) { + (encode->startJsonFp)(pContext, &pContext->singleCmd, result); + } + + taos_fetch_rows_a(result, httpProcessSingleSqlRetrieveCallBack, pContext); + } +} + +void httpProcessSingleSqlCmd(HttpContext *pContext) { + HttpSqlCmd * cmd = &pContext->singleCmd; + char * sql = cmd->nativSql; + HttpSession *pSession = pContext->session; + + if (sql == NULL || sql[0] == 0) { + httpError("context:%p, fd:%d, ip:%s, user:%s, error:no sql input", pContext, pContext->fd, pContext->ipstr, + pContext->user); + httpSendErrorResp(pContext, HTTP_NO_SQL_INPUT); + return; + } + + httpDump("context:%p, fd:%d, ip:%s, user:%s, sql:%s, start query", pContext, pContext->fd, pContext->ipstr, + pContext->user, sql); + taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext); +} + +void httpProcessLoginCmd(HttpContext *pContext) { + char token[128] = "current version only supports basic authorization, no token returned"; + httpTrace("context:%p, fd:%d, ip:%s, user:%s, return token:%s", pContext, pContext->fd, pContext->ipstr, + pContext->user, token); + httpTrace("user:%s login from %s via http", pContext->user, pContext->ipstr); + httpSendSuccResp(pContext, token); +} + +void httpProcessHeartBeatCmd(HttpContext *pContext) { + HttpEncodeMethod *encode = pContext->encodeMethod; + if (encode->startJsonFp) { + (encode->startJsonFp)(pContext, &pContext->singleCmd, NULL); + } + if (encode->stopJsonFp) { + (encode->stopJsonFp)(pContext, &pContext->singleCmd); + } + httpCloseContextByApp(pContext); +} + +void httpExecCmd(HttpContext *pContext) { + switch (pContext->reqType) { + case HTTP_REQTYPE_LOGIN: + httpProcessLoginCmd(pContext); + break; + case HTTP_REQTYPE_SINGLE_SQL: + httpProcessSingleSqlCmd(pContext); + break; + case HTTP_REQTYPE_MULTI_SQL: + httpProcessMultiSqlCmd(pContext); + break; + case HTTP_REQTYPE_HEARTBEAT: + httpProcessHeartBeatCmd(pContext); + break; + case HTTP_REQTYPE_OTHERS: + httpCloseContextByApp(pContext); + break; + default: + httpCloseContextByApp(pContext); + break; + } +} + +void httpProcessRequestCb(void *param, TAOS_RES *result, int code) { + HttpContext *pContext = param; + if (pContext == NULL || pContext->signature != pContext) return; + + if (code < 0) { + httpError("context:%p, fd:%d, ip:%s, user:%s, login error, code:%d", pContext, pContext->fd, pContext->ipstr, + pContext->user, -code); + httpSendTaosdErrorResp(pContext, -code); + return; + } + + httpTrace("context:%p, fd:%d, ip:%s, user:%s, connect tdengine success, taos:%p", pContext, pContext->fd, + pContext->ipstr, pContext->user, pContext->taos); + if (pContext->taos == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, login error, taos is empty", pContext, pContext->fd, pContext->ipstr, + pContext->user); + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_SESSIONS); + return; + } + + httpCreateSession(pContext, pContext->taos); + + if (pContext->session == NULL) { + httpSendErrorResp(pContext, HTTP_SESSION_FULL); + httpCloseContextByApp(pContext); + } else { + httpExecCmd(pContext); + } +} + +void httpProcessRequest(HttpContext *pContext) { + if (pContext->session == NULL) { + httpFetchSession(pContext); + } + + if (pContext->session == NULL || pContext->session != pContext->session->signature || + pContext->reqType == HTTP_REQTYPE_LOGIN) { + taos_connect_a(NULL, pContext->user, pContext->pass, "", 0, httpProcessRequestCb, (void *)pContext, + &(pContext->taos)); + httpTrace("context:%p, fd:%d, ip:%s, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, + pContext->ipstr, pContext->user, pContext->taos); + } else { + httpAccessSession(pContext); + httpExecCmd(pContext); + } +} diff --git a/src/modules/http/src/httpSystem.c b/src/modules/http/src/httpSystem.c new file mode 100644 index 000000000000..43923d985c72 --- /dev/null +++ b/src/modules/http/src/httpSystem.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" +#include "shash.h" +#include "taos.h" +#include "tglobalcfg.h" +#include "tsocket.h" +#include "ttimer.h" + +#include "gcHandle.h" +#include "httpHandle.h" +#include "restHandle.h" +#include "tgHandle.h" + +static HttpServer *httpServer = NULL; + +int httpInitSystem() { + taos_init(); + + httpServer = (HttpServer *)malloc(sizeof(HttpServer)); + memset(httpServer, 0, sizeof(HttpServer)); + + strcpy(httpServer->label, "taosh"); + strcpy(httpServer->serverIp, tsHttpIp); + httpServer->serverPort = tsHttpPort; + httpServer->cacheContext = tsHttpCacheSessions; + httpServer->sessionExpire = tsHttpSessionExpire; + httpServer->numOfThreads = tsHttpMaxThreads; + httpServer->processData = httpProcessData; + + pthread_mutex_init(&httpServer->serverMutex, NULL); + + restInitHandle(httpServer); + gcInitHandle(httpServer); + tgInitHandle(httpServer); + + return 0; +} + +int httpStartSystem() { + httpPrint("starting to initialize http service ..."); + + if (httpServer == NULL) { + httpError("http server is null"); + return -1; + } + + if (httpServer->pContextPool == NULL) { + httpServer->pContextPool = taosMemPoolInit(httpServer->cacheContext, sizeof(HttpContext)); + } + if (httpServer->pContextPool == NULL) { + httpError("http init context pool failed"); + return -1; + } + + if (httpServer->timerHandle == NULL) { + httpServer->timerHandle = taosTmrInit(5, 1000, 60000, "http"); + } + if (httpServer->timerHandle == NULL) { + httpError("http init timer failed"); + return -1; + } + + if (!httpInitAllSessions(httpServer)) { + httpError("http init session failed"); + return -1; + } + + if (!httpInitConnect(httpServer)) { + httpError("http init server failed"); + return -1; + } + + return 0; +} + +void httpStopSystem() { + if (httpServer != NULL) { + httpServer->online = false; + } +} + +void httpCleanUpSystem() { + httpPrint("http service cleanup"); + httpStopSystem(); +#if 0 + if (httpServer == NULL) { + return; + } + + if (httpServer->expireTimer != NULL) { + taosTmrStopA(&(httpServer->expireTimer)); + } + + if (httpServer->timerHandle != NULL) { + taosTmrCleanUp(httpServer->timerHandle); + httpServer->timerHandle = NULL; + } + + httpCleanUpConnect(httpServer); + httpRemoveAllSessions(httpServer); + + if (httpServer->pContextPool != NULL) { + taosMemPoolCleanUp(httpServer->pContextPool); + httpServer->pContextPool = NULL; + } + + pthread_mutex_destroy(&httpServer->serverMutex); + + tfree(httpServer); +#endif +} + +void httpGetReqCount(int32_t *httpReqestNum) { + if (httpServer != NULL) { + *httpReqestNum = __sync_fetch_and_and(&httpServer->requestNum, 0); + } else { + *httpReqestNum = 0; + } +} diff --git a/src/modules/http/src/httpUtil.c b/src/modules/http/src/httpUtil.c new file mode 100644 index 000000000000..85cda3239e06 --- /dev/null +++ b/src/modules/http/src/httpUtil.c @@ -0,0 +1,359 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include "tmd5.h" + +#include "http.h" +#include "httpCode.h" +#include "httpHandle.h" +#include "httpResp.h" + +#include "shash.h" +#include "taos.h" + +bool httpCheckUsedbSql(char *sql) { + if (strstr(sql, "use ") != NULL) { + return true; + } + return false; +} + +void httpTimeToString(time_t t, char *buf, int buflen) { + memset(buf, 0, (size_t)buflen); + char ts[30] = {0}; + + struct tm *ptm; + time_t tt = t / 1000; + ptm = localtime(&tt); + strftime(ts, 64, "%Y-%m-%d %H:%M:%S", ptm); + sprintf(buf, "%s.%03ld", ts, t % 1000); +} + +int32_t httpAddToSqlCmdBuffer(HttpContext *pContext, const char *const format, ...) { + HttpSqlCmds *cmd = pContext->multiCmds; + if (cmd->buffer == NULL) return -1; + + int remainLength = cmd->bufferSize - cmd->bufferPos; + if (remainLength < 4096) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + char *buffer = cmd->buffer + cmd->bufferPos; + int len = 0; + + va_list argpointer; + va_start(argpointer, format); + len += vsnprintf(buffer, (size_t)remainLength, format, argpointer); + va_end(argpointer); + + if (cmd->bufferPos + len + 1 >= cmd->bufferSize) { + return -1; + } + + cmd->buffer[cmd->bufferPos + len] = 0; + cmd->bufferPos = cmd->bufferPos + len + 1; + + remainLength = cmd->bufferSize - cmd->bufferPos; + if (remainLength < 4096) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + return (int32_t)(buffer - cmd->buffer); +} + +int32_t httpAddToSqlCmdBufferNoTerminal(HttpContext *pContext, const char *const format, ...) { + HttpSqlCmds *cmd = pContext->multiCmds; + if (cmd->buffer == NULL) return -1; + + int remainLength = cmd->bufferSize - cmd->bufferPos; + if (remainLength < 4096) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + char *buffer = cmd->buffer + cmd->bufferPos; + int len = 0; + + va_list argpointer; + va_start(argpointer, format); + len += vsnprintf(buffer, (size_t)remainLength, format, argpointer); + va_end(argpointer); + + if (cmd->bufferPos + len + 1 >= cmd->bufferSize) { + return -1; + } + + cmd->bufferPos = cmd->bufferPos + len; + + remainLength = cmd->bufferSize - cmd->bufferPos; + if (remainLength < 4096) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + return (int32_t)(buffer - cmd->buffer); +} + +int32_t httpAddToSqlCmdBufferTerminal(HttpContext *pContext) { + HttpSqlCmds *cmd = pContext->multiCmds; + if (cmd->buffer == NULL) return -1; + + int remainLength = cmd->bufferSize - cmd->bufferPos; + if (remainLength < 4096) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + char *buffer = cmd->buffer + cmd->bufferPos; + *buffer = 0; + cmd->bufferPos = cmd->bufferPos + 1; + + remainLength = cmd->bufferSize - cmd->bufferPos; + if (remainLength < 4096) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + return (int32_t)(buffer - cmd->buffer); +} + +int32_t httpAddToSqlCmdBufferWithSize(HttpContext *pContext, int mallocSize) { + HttpSqlCmds *cmd = pContext->multiCmds; + if (cmd->buffer == NULL) return -1; + + if (cmd->bufferPos + mallocSize >= cmd->bufferSize) { + if (!httpReMallocMultiCmdsBuffer(pContext, cmd->bufferSize * 2)) return -1; + } + + char *buffer = cmd->buffer + cmd->bufferPos; + memset(cmd->buffer + cmd->bufferPos, 0, (size_t)mallocSize); + cmd->bufferPos = cmd->bufferPos + mallocSize; + + return (int32_t)(buffer - cmd->buffer); +} + +bool httpMallocMultiCmds(HttpContext *pContext, int cmdSize, int bufferSize) { + if (cmdSize > HTTP_MAX_CMD_SIZE) { + httpError("context:%p, fd:%d, ip:%s, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, + pContext->ipstr, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); + return false; + } + + if (pContext->multiCmds == NULL) { + pContext->multiCmds = (HttpSqlCmds *)malloc(sizeof(HttpSqlCmds)); + if (pContext->multiCmds == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, malloc multiCmds error", pContext, pContext->fd, pContext->ipstr, + pContext->user); + return false; + } + memset(pContext->multiCmds, 0, sizeof(HttpSqlCmds)); + } + + HttpSqlCmds *multiCmds = pContext->multiCmds; + if (multiCmds->cmds == NULL || cmdSize > multiCmds->maxSize) { + free(multiCmds->cmds); + multiCmds->cmds = (HttpSqlCmd *)malloc((size_t)cmdSize * sizeof(HttpSqlCmd)); + if (multiCmds->cmds == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->ipstr, + pContext->user, cmdSize); + return false; + } + multiCmds->maxSize = (int16_t)cmdSize; + } + + if (multiCmds->buffer == NULL || bufferSize > multiCmds->bufferSize) { + free(multiCmds->buffer); + multiCmds->buffer = (char *)malloc((size_t)bufferSize); + if (multiCmds->buffer == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->ipstr, + pContext->user, bufferSize); + return false; + } + multiCmds->bufferSize = bufferSize; + } + + multiCmds->pos = 0; + multiCmds->size = 0; + multiCmds->bufferPos = 0; + memset(multiCmds->cmds, 0, (size_t)multiCmds->maxSize * sizeof(HttpSqlCmd)); + + return true; +} + +bool httpReMallocMultiCmdsSize(HttpContext *pContext, int cmdSize) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + + if (cmdSize > HTTP_MAX_CMD_SIZE) { + httpError("context:%p, fd:%d, ip:%s, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, + pContext->ipstr, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); + return false; + } + + multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + if (multiCmds->cmds == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->ipstr, + pContext->user, cmdSize); + return false; + } + memset(multiCmds->cmds + multiCmds->maxSize * (int16_t)sizeof(HttpSqlCmd), 0, + (size_t)(cmdSize - multiCmds->maxSize) * sizeof(HttpSqlCmd)); + multiCmds->maxSize = (int16_t)cmdSize; + + return true; +} + +bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int bufferSize) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + + if (bufferSize > HTTP_MAX_BUFFER_SIZE) { + httpError("context:%p, fd:%d, ip:%s, user:%s, mulitcmd buffer size:%d large then %d", + pContext, pContext->fd, pContext->ipstr, pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE); + return false; + } + + multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + if (multiCmds->buffer == NULL) { + httpError("context:%p, fd:%d, ip:%s, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->ipstr, + pContext->user, bufferSize); + return false; + } + memset(multiCmds->buffer + multiCmds->bufferSize, 0, (size_t)(bufferSize - multiCmds->bufferSize)); + multiCmds->bufferSize = bufferSize; + + return true; +} + +void httpFreeMultiCmds(HttpContext *pContext) { + if (pContext->multiCmds != NULL) { + if (pContext->multiCmds->buffer != NULL) free(pContext->multiCmds->buffer); + if (pContext->multiCmds->cmds != NULL) free(pContext->multiCmds->cmds); + free(pContext->multiCmds); + pContext->multiCmds = NULL; + } +} + +JsonBuf *httpMallocJsonBuf(HttpContext *pContext) { + if (pContext->jsonBuf == NULL) { + pContext->jsonBuf = (JsonBuf *)malloc(sizeof(JsonBuf)); + } + + return pContext->jsonBuf; +} + +void httpFreeJsonBuf(HttpContext *pContext) { + if (pContext->jsonBuf != NULL) { + free(pContext->jsonBuf); + pContext->jsonBuf = 0; + } +} + +bool httpCompareMethod(HttpDecodeMethod *pSrc, HttpDecodeMethod *pCmp) { + if (strcmp(pSrc->module, pCmp->module) != 0) { + return false; + } + return true; +} + +void httpAddMethod(HttpServer *pServer, HttpDecodeMethod *pMethod) { + int pos = 0; + for (pos = 0; pos < pServer->methodScannerLen; ++pos) { + if (httpCompareMethod(pServer->methodScanner[pos], pMethod)) { + break; + } + } + + if (pos == pServer->methodScannerLen && pServer->methodScannerLen < HTTP_METHOD_SCANNER_SIZE) { + pServer->methodScanner[pos] = pMethod; + pServer->methodScannerLen++; + } +} + +HttpSqlCmd *httpNewSqlCmd(HttpContext *pContext) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + if (multiCmds->size >= multiCmds->maxSize) { + if (!httpReMallocMultiCmdsSize(pContext, 2 * multiCmds->maxSize)) return NULL; + } + + HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->size++; + cmd->cmdType = HTTP_CMD_TYPE_UN_SPECIFIED; + cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_WITH_RETURN; + cmd->cmdState = HTTP_CMD_STATE_NOT_RUN_YET; + + return cmd; +} + +HttpSqlCmd *httpCurrSqlCmd(HttpContext *pContext) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + if (multiCmds->size == 0) return NULL; + if (multiCmds->size > multiCmds->maxSize) return NULL; + + return multiCmds->cmds + multiCmds->size - 1; +} + +int httpNextSqlCmdPos(HttpContext *pContext) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + return multiCmds->size; +} + +void httpTrimTableName(char *name) { + for (int i = 0; name[i] != 0; i++) { + if (name[i] == ' ' || name[i] == ':' || name[i] == '.' || name[i] == '-' || name[i] == '/' || name[i] == '\'') + name[i] = '_'; + if (i == TSDB_METER_NAME_LEN + 1) { + name[i] = 0; + break; + } + } +} + +int httpShrinkTableName(HttpContext *pContext, int pos, char *name) { + int len = 0; + for (int i = 0; name[i] != 0; i++) { + if (name[i] == ' ' || name[i] == ':' || name[i] == '.' || name[i] == '-' || name[i] == '/' || name[i] == '\'' || + name[i] == '\"') + name[i] = '_'; + len++; + } + + if (len < TSDB_METER_NAME_LEN) { + return pos; + } + + MD5_CTX context; + MD5Init(&context); + MD5Update(&context, (uint8_t *)name, (uint32_t)len); + MD5Final(&context); + + int table_name = httpAddToSqlCmdBuffer( + pContext, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0], + context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6], + context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11], + context.digest[12], context.digest[13], context.digest[14], context.digest[15]); + + if (table_name != -1) { + httpGetCmdsString(pContext, table_name)[0] = 't'; + } + + return table_name; +} + +char *httpGetCmdsString(HttpContext *pContext, int pos) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + if (pos < 0 || pos >= multiCmds->bufferSize) { + return ""; + } + + return multiCmds->buffer + pos; +} \ No newline at end of file diff --git a/src/modules/http/src/restHandle.c b/src/modules/http/src/restHandle.c new file mode 100644 index 000000000000..742b64eee2ff --- /dev/null +++ b/src/modules/http/src/restHandle.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "restHandle.h" +#include "restJson.h" + +static HttpDecodeMethod restDecodeMethod = {"rest", restProcessRequest}; +static HttpDecodeMethod restDecodeMethod2 = {"restful", restProcessRequest}; +static HttpEncodeMethod restEncodeSqlMethod = { + restStartSqlJson, restStopSqlJson, restBuildSqlJson, restBuildSqlAffectRowsJson, NULL, NULL, NULL, NULL}; +static HttpEncodeMethod restEncodeSqlTimeMethod = { + restStartSqlJson, restStopSqlJson, restBuildSqlTimeJson, restBuildSqlAffectRowsJson, NULL, NULL, NULL, NULL}; + +void restInitHandle(HttpServer* pServer) { + httpAddMethod(pServer, &restDecodeMethod); + httpAddMethod(pServer, &restDecodeMethod2); +} + +bool restGetUserFromUrl(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + if (pParser->path[REST_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[REST_USER_URL_POS].len <= 0) { + return false; + } + + strcpy(pContext->user, pParser->path[REST_USER_URL_POS].pos); + return true; +} + +bool restGetPassFromUrl(HttpContext* pContext) { + HttpParser* pParser = &pContext->pThread->parser; + if (pParser->path[REST_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[REST_PASS_URL_POS].len <= 0) { + return false; + } + + strcpy(pContext->pass, pParser->path[REST_PASS_URL_POS].pos); + return true; +} + +bool restProcessLoginRequest(HttpContext* pContext) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process restful login msg", pContext, pContext->fd, pContext->ipstr, + pContext->user); + pContext->reqType = HTTP_REQTYPE_LOGIN; + return true; +} + +bool restProcessSqlRequest(HttpContext* pContext, int isSqlT) { + httpTrace("context:%p, fd:%d, ip:%s, user:%s, process restful sql msg", pContext, pContext->fd, pContext->ipstr, + pContext->user); + + char* sql = pContext->pThread->parser.data.pos; + if (sql == NULL) { + httpSendErrorResp(pContext, HTTP_NO_SQL_INPUT); + return false; + } + + if (httpCheckUsedbSql(sql)) { + httpSendErrorResp(pContext, HTTP_NO_EXEC_USEDB); + return false; + } + + HttpSqlCmd* cmd = &(pContext->singleCmd); + cmd->nativSql = sql; + + pContext->reqType = HTTP_REQTYPE_SINGLE_SQL; + if (!isSqlT) + pContext->encodeMethod = &restEncodeSqlMethod; + else + pContext->encodeMethod = &restEncodeSqlTimeMethod; + + return true; +} + +bool restProcessRequest(struct HttpContext* pContext) { + if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "login")) { + restGetUserFromUrl(pContext); + restGetPassFromUrl(pContext); + } + + if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) { + httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR); + return false; + } + + if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "sql")) { + return restProcessSqlRequest(pContext, 0); + } else if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "sqlt")) { + return restProcessSqlRequest(pContext, 1); + } else if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "login")) { + return restProcessLoginRequest(pContext); + } else { + } + + httpSendErrorResp(pContext, HTTP_PARSE_URL_ERROR); + return false; +} diff --git a/src/modules/http/src/restJson.c b/src/modules/http/src/restJson.c new file mode 100644 index 000000000000..d03045ad1a0a --- /dev/null +++ b/src/modules/http/src/restJson.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "httpJson.h" +#include "restHandle.h" +#include "restJson.h" + +void restBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // data row array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + httpJsonItemToken(jsonBuf); + httpJsonInt(jsonBuf, affect_rows); + + // data row array end + httpJsonToken(jsonBuf, JsonArrEnd); + + cmd->numOfRows = affect_rows; +} + +void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + TAOS_FIELD *fields = taos_fetch_fields(result); + int num_fields = taos_num_fields(result); + + httpInitJsonBuf(jsonBuf, pContext); + httpWriteJsonBufHead(jsonBuf); + + // object begin + httpJsonToken(jsonBuf, JsonObjStt); + + // status, and data + httpJsonItemToken(jsonBuf); + httpJsonPair(jsonBuf, REST_JSON_STATUS, REST_JSON_STATUS_LEN, REST_JSON_SUCCESS, REST_JSON_SUCCESS_LEN); + + // head begin + httpJsonItemToken(jsonBuf); + httpJsonPairHead(jsonBuf, REST_JSON_HEAD, REST_JSON_HEAD_LEN); + // head array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + if (num_fields == 0) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + for (int i = 0; i < num_fields; ++i) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int)strlen(fields[i].name)); + } + } + + // head array end + httpJsonToken(jsonBuf, JsonArrEnd); + + // data begin + httpJsonItemToken(jsonBuf); + httpJsonPairHead(jsonBuf, REST_JSON_DATA, REST_JSON_DATA_LEN); + // data array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); +} + +bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return false; + + cmd->numOfRows += numOfRows; + + int num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + for (int i = 0; i < numOfRows; ++i) { + TAOS_ROW row = taos_fetch_row(result); + + // data row array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + for (int i = 0; i < num_fields; i++) { + httpJsonItemToken(jsonBuf); + + if (row[i] == NULL) { + httpJsonString(jsonBuf, "NULL", 4); + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + httpJsonInt(jsonBuf, *((int8_t *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + httpJsonInt(jsonBuf, *((int16_t *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + httpJsonInt(jsonBuf, *((int32_t *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + httpJsonInt64(jsonBuf, *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + httpJsonFloat(jsonBuf, *((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + httpJsonDouble(jsonBuf, *((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + httpJsonStringForTransMean(jsonBuf, row[i], fields[i].bytes); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + // httpJsonInt64(jsonBuf, *((int64_t *)row[i])); + // httpTimeToString(*((int64_t *)row[i]), timeBuf, 32); + // httpJsonString(jsonBuf, timeBuf, strlen(timeBuf)); + httpJsonTimestamp(jsonBuf, *((int64_t *)row[i])); + break; + default: + break; + } + } + + // data row array end + httpJsonToken(jsonBuf, JsonArrEnd); + } + + httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%lld retrieved", pContext, pContext->fd, pContext->ipstr, + pContext->user, cmd->numOfRows); + return true; +} + +bool restBuildSqlTimeJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int numOfRows) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return false; + + cmd->numOfRows += numOfRows; + + int num_fields = taos_num_fields(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + for (int i = 0; i < numOfRows; ++i) { + TAOS_ROW row = taos_fetch_row(result); + + // data row array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); + + for (int i = 0; i < num_fields; i++) { + httpJsonItemToken(jsonBuf); + + if (row[i] == NULL) { + httpJsonString(jsonBuf, "NULL", 4); + continue; + } + + switch (fields[i].type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + httpJsonInt(jsonBuf, *((int8_t *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + httpJsonInt(jsonBuf, *((int16_t *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + httpJsonInt(jsonBuf, *((int32_t *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + httpJsonInt64(jsonBuf, *((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + httpJsonFloat(jsonBuf, *((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + httpJsonDouble(jsonBuf, *((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + httpJsonStringForTransMean(jsonBuf, row[i], fields[i].bytes); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + httpJsonInt64(jsonBuf, *((int64_t *)row[i])); + // httpTimeToString(*((int64_t *)row[i]), timeBuf, 32); + // httpJsonString(jsonBuf, timeBuf, strlen(timeBuf)); + // httpJsonTimestamp(jsonBuf, *((int64_t *)row[i])); + break; + default: + break; + } + } + + // data row array end + httpJsonToken(jsonBuf, JsonArrEnd); + } + + return true; +} + +void restStopSqlJson(HttpContext *pContext, HttpSqlCmd *cmd) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // data array end + httpJsonToken(jsonBuf, JsonArrEnd); + + // rows + httpJsonItemToken(jsonBuf); + httpJsonPairHead(jsonBuf, REST_JSON_ROWS, REST_JSON_ROWS_LEN); + httpJsonInt64(jsonBuf, cmd->numOfRows); + + // object end + httpJsonToken(jsonBuf, JsonObjEnd); + + httpWriteJsonBufEnd(jsonBuf); +} \ No newline at end of file diff --git a/src/modules/http/src/tgHandle.c b/src/modules/http/src/tgHandle.c new file mode 100644 index 000000000000..7b292cd54164 --- /dev/null +++ b/src/modules/http/src/tgHandle.c @@ -0,0 +1,1280 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "tgHandle.h" +#include "shash.h" +#include "taosmsg.h" +#include "tgJson.h" +#include "tsdb.h" + +#define TG_MAX_SORT_TAG_SIZE 20 + +static HttpDecodeMethod tgDecodeMethod = {"telegraf", tgProcessRquest}; +static HttpEncodeMethod tgQueryMethod = {tgStartQueryJson, tgStopQueryJson, NULL, + tgBuildSqlAffectRowsJson, tgInitQueryJson, tgCleanQueryJson, + tgCheckFinished, tgSetNextCmd}; + +typedef struct { + char *tagName; + char *tagAlias; + char *tagType; +} STgTag; + +typedef struct { + char *fieldName; + char *fieldAlias; + char *fieldType; +} STgField; + +typedef struct { + char * stName; + char * stAlias; + STgTag * tags; + STgField *fields; + int16_t tagNum; + int16_t fieldNum; + char * createSTableStr; +} STgStable; + +/* + * hash of STgStable + */ +static void *tgSchemaHash = NULL; + +/* + * formats like + * behind the midline is an alias of field/tag/stable + { + "metrics": [{ + "name": "win_cpu-cpu", + "fields": { + "Percent_DPC_Time": "float", + "Percent_Idle_Time": "float", + "Percent_Interrupt_Time": "float", + "Percent_Privileged_Time": "float", + "Percent_Processor_Time": "float", + "Percent_User_Time": "float" + }, + "tags": { + "host": "binary(32)", + "instance": "binary(32)", + "objectname": "binary(32)" + } + }, + { + "fields": { + "Bytes_Received_persec-f1": "float", + "Bytes_Sent_persec-f2": "float", + "Packets_Outbound_Discarded-f3": "float", + "Packets_Outbound_Errors-f4": "float", + "Packets_Received_Discarded-f5": "float", + "Packets_Received_Errors": "float", + "Packets_Received_persec": "float", + "Packets_Sent_persec": "float" + }, + "name": "win_net", + "tags": { + "host": "binary(32)", + "instance": "binary(32)", + "objectname": "binary(32)" + }, + "timestamp": 1536219762000 + }] + } + */ +void tgReadSchemaMetric(cJSON *metric) { + STgStable stable = {0}; + int createSTableStrLen = 100; + bool parsedOk = true; + + // stable name + cJSON *name = cJSON_GetObjectItem(metric, "name"); + if (name == NULL) { + parsedOk = false; + goto ParseEnd; + } + if (name->type != cJSON_String) { + parsedOk = false; + goto ParseEnd; + } + if (name->valuestring == NULL) { + parsedOk = false; + goto ParseEnd; + } + int nameLen = (int)strlen(name->valuestring); + if (nameLen == 0) { + parsedOk = false; + goto ParseEnd; + } + int aliasPos = -1; + for (int i = 0; i < nameLen - 1; ++i) { + if (name->valuestring[i] == '-') { + aliasPos = i; + break; + } + } + if (aliasPos == -1) { + stable.stName = stable.stAlias = calloc((size_t)nameLen + 1, 1); + strcpy(stable.stName, name->valuestring); + createSTableStrLen += nameLen; + } else { + stable.stName = calloc((size_t)aliasPos + 1, 1); + stable.stAlias = calloc((size_t)(nameLen - aliasPos), 1); + strncpy(stable.stName, name->valuestring, (size_t)aliasPos); + strncpy(stable.stAlias, name->valuestring + aliasPos + 1, (size_t)(nameLen - aliasPos - 1)); + createSTableStrLen += (nameLen - aliasPos); + } + + // tags + cJSON *tags = cJSON_GetObjectItem(metric, "tags"); + if (tags == NULL) { + parsedOk = false; + goto ParseEnd; + } + int tagsSize = cJSON_GetArraySize(tags); + if (tagsSize <= 0 || tagsSize > TSDB_MAX_TAGS) { + parsedOk = false; + goto ParseEnd; + } + stable.tags = calloc(sizeof(STgTag), (size_t)tagsSize); + stable.tagNum = (int16_t)tagsSize; + for (int i = 0; i < tagsSize; i++) { + STgTag *tagSchema = &stable.tags[i]; + cJSON * tag = cJSON_GetArrayItem(tags, i); + if (tag == NULL) { + parsedOk = false; + goto ParseEnd; + } + if (tag->string == NULL) { + parsedOk = false; + goto ParseEnd; + } + int nameLen = (int)strlen(tag->string); + if (nameLen == 0 || nameLen > TSDB_METER_NAME_LEN) { + parsedOk = false; + goto ParseEnd; + } + int aliasPos = -1; + for (int i = 0; i < nameLen - 1; ++i) { + if (tag->string[i] == '-') { + aliasPos = i; + break; + } + } + if (aliasPos == -1) { + tagSchema->tagName = calloc((size_t)nameLen + 1, 1); + strcpy(tagSchema->tagName, tag->string); + tagSchema->tagAlias = calloc((size_t)nameLen + 3, 1); + strcpy(tagSchema->tagAlias, "t_"); + strcpy(tagSchema->tagAlias + 2, tag->string); + createSTableStrLen += (nameLen + 4); + } else { + tagSchema->tagName = calloc((size_t)aliasPos + 1, 1); + tagSchema->tagAlias = calloc((size_t)(nameLen - aliasPos), 1); + strncpy(tagSchema->tagName, tag->string, (size_t)aliasPos); + strncpy(tagSchema->tagAlias, tag->string + aliasPos + 1, (size_t)(nameLen - aliasPos - 1)); + createSTableStrLen += (nameLen - aliasPos + 2); + } + + if (tag->type == cJSON_String) { + if (tag->valuestring == NULL) { + parsedOk = false; + goto ParseEnd; + } + int valueLen = (int)strlen(tag->valuestring); + if (valueLen == 0) { + parsedOk = false; + goto ParseEnd; + } + if (strcasecmp(tag->valuestring, "timestamp") == 0 || strcasecmp(tag->valuestring, "bool") == 0 || + strcasecmp(tag->valuestring, "tinyint") == 0 || strcasecmp(tag->valuestring, "smallint") == 0 || + strcasecmp(tag->valuestring, "int") == 0 || strcasecmp(tag->valuestring, "bigint") == 0 || + strcasecmp(tag->valuestring, "float") == 0 || strcasecmp(tag->valuestring, "double") == 0 || + strncasecmp(tag->valuestring, "binary", 6) == 0 || strncasecmp(tag->valuestring, "nchar", 5) == 0) { + tagSchema->tagType = calloc((size_t)valueLen + 1, 1); + strcpy(tagSchema->tagType, tag->valuestring); + createSTableStrLen += valueLen; + } else { + tagSchema->tagType = calloc(11, 1); + strcpy(tagSchema->tagType, "binary(32)"); + createSTableStrLen += 12; + } + } else if (tag->type == cJSON_False || tag->type == cJSON_True) { + tagSchema->tagType = calloc(8, 1); + strcpy(tagSchema->tagType, "tinyint"); + createSTableStrLen += 10; + } else { + tagSchema->tagType = calloc(7, 1); + strcpy(tagSchema->tagType, "bigint"); + createSTableStrLen += 9; + } + } + + // fields + cJSON *fields = cJSON_GetObjectItem(metric, "fields"); + if (fields == NULL) { + parsedOk = false; + goto ParseEnd; + } + int fieldSize = cJSON_GetArraySize(fields); + if (fieldSize <= 0 || fieldSize > TSDB_MAX_COLUMNS) { + parsedOk = false; + goto ParseEnd; + } + stable.fields = calloc(sizeof(STgField), (size_t)fieldSize); + stable.fieldNum = (int16_t)fieldSize; + for (int i = 0; i < fieldSize; i++) { + STgField *fieldSchema = &stable.fields[i]; + cJSON * field = cJSON_GetArrayItem(fields, i); + if (field == NULL) { + parsedOk = false; + goto ParseEnd; + } + if (field->string == NULL) { + parsedOk = false; + goto ParseEnd; + } + int nameLen = (int)strlen(field->string); + if (nameLen == 0 || nameLen > TSDB_METER_NAME_LEN) { + parsedOk = false; + goto ParseEnd; + } + int aliasPos = -1; + for (int i = 0; i < nameLen - 1; ++i) { + if (field->string[i] == '-') { + aliasPos = i; + break; + } + } + if (aliasPos == -1) { + fieldSchema->fieldName = calloc((size_t)nameLen + 1, 1); + strcpy(fieldSchema->fieldName, field->string); + fieldSchema->fieldAlias = calloc((size_t)nameLen + 3, 1); + strcpy(fieldSchema->fieldAlias, "f_"); + strcpy(fieldSchema->fieldAlias + 2, field->string); + createSTableStrLen += (nameLen + 4); + } else { + fieldSchema->fieldName = calloc((size_t)aliasPos + 1, 1); + fieldSchema->fieldAlias = calloc((size_t)(nameLen - aliasPos), 1); + strncpy(fieldSchema->fieldName, field->string, (size_t)aliasPos); + strncpy(fieldSchema->fieldAlias, field->string + aliasPos + 1, (size_t)(nameLen - aliasPos - 1)); + createSTableStrLen += (nameLen - aliasPos + 2); + } + + if (field->type == cJSON_String) { + if (field->valuestring == NULL) { + parsedOk = false; + goto ParseEnd; + } + int valueLen = (int)strlen(field->valuestring); + if (valueLen == 0) { + parsedOk = false; + goto ParseEnd; + } + if (strcasecmp(field->valuestring, "timestamp") == 0 || strcasecmp(field->valuestring, "bool") == 0 || + strcasecmp(field->valuestring, "tinyint") == 0 || strcasecmp(field->valuestring, "smallint") == 0 || + strcasecmp(field->valuestring, "int") == 0 || strcasecmp(field->valuestring, "bigint") == 0 || + strcasecmp(field->valuestring, "float") == 0 || strcasecmp(field->valuestring, "double") == 0 || + strncasecmp(field->valuestring, "binary", 6) == 0 || strncasecmp(field->valuestring, "nchar", 5) == 0) { + fieldSchema->fieldType = calloc((size_t)valueLen + 1, 1); + strcpy(fieldSchema->fieldType, field->valuestring); + createSTableStrLen += valueLen; + } else { + fieldSchema->fieldType = calloc(11, 1); + strcpy(fieldSchema->fieldType, "binary(32)"); + createSTableStrLen += 12; + } + } else if (field->type == cJSON_False || field->type == cJSON_True) { + fieldSchema->fieldType = calloc(8, 1); + strcpy(fieldSchema->fieldType, "tinyint"); + createSTableStrLen += 10; + } else { + fieldSchema->fieldType = calloc(7, 1); + strcpy(fieldSchema->fieldType, "double"); + createSTableStrLen += 9; + } + } + + // assembling create stable sql + stable.createSTableStr = calloc((size_t)createSTableStrLen, 1); + strcpy(stable.createSTableStr, "create table if not exists %s.%s(ts timestamp"); + int len = (int)strlen(stable.createSTableStr); + for (int i = 0; i < stable.fieldNum; ++i) { + STgField *field = &stable.fields[i]; + len += sprintf(stable.createSTableStr + len, ",%s %s", field->fieldAlias, field->fieldType); + } + len += sprintf(stable.createSTableStr + len, ") tags("); + for (int i = 0; i < stable.tagNum; ++i) { + STgTag *tag = &stable.tags[i]; + if (i == 0) { + len += sprintf(stable.createSTableStr + len, "%s %s", tag->tagAlias, tag->tagType); + } else { + len += sprintf(stable.createSTableStr + len, ",%s %s", tag->tagAlias, tag->tagType); + } + } + sprintf(stable.createSTableStr + len, ")"); + +ParseEnd: + + if (parsedOk) { + taosAddStrHash(tgSchemaHash, stable.stName, (char *)(&stable)); + } else { + if (stable.stName != NULL) { + free(stable.stName); + } + if (stable.stAlias != NULL) { + free(stable.stName); + } + for (int i = 0; i < stable.tagNum; ++i) { + if (stable.tags[i].tagName != NULL) { + free(stable.tags[i].tagName); + } + if (stable.tags[i].tagAlias != NULL) { + free(stable.tags[i].tagAlias); + } + if (stable.tags[i].tagType != NULL) { + free(stable.tags[i].tagType); + } + } + if (stable.tags != NULL) { + free(stable.tags); + } + for (int i = 0; i < stable.fieldNum; ++i) { + if (stable.fields[i].fieldName != NULL) { + free(stable.fields[i].fieldName); + } + if (stable.fields[i].fieldAlias != NULL) { + free(stable.fields[i].fieldAlias); + } + if (stable.fields[i].fieldType != NULL) { + free(stable.fields[i].fieldType); + } + } + if (stable.fields != NULL) { + free(stable.fields); + } + if (stable.createSTableStr != NULL) { + free(stable.createSTableStr); + } + } +} + +int tgReadSchema(const char *fileName) { + FILE *fp = fopen(fileName, "r"); + if (fp == NULL) { + httpPrint("failed to open telegraf schema config file:%s, use default schema", fileName); + return -1; + } + httpPrint("open telegraf schema config file:%s successfully", fileName); + + fseek(fp, 0, SEEK_END); + size_t contentSize = (size_t)ftell(fp); + rewind(fp); + char * content = (char *)calloc(contentSize * sizeof(char) + 1, 1); + size_t result = fread(content, 1, contentSize, fp); + if (result != contentSize) { + httpError("failed to read telegraf schema config file:%s, use default schema", fileName); + return -1; + } + + cJSON *root = cJSON_Parse(content); + if (root == NULL) { + httpError("failed to parse telegraf schema config file:%s, invalid json format", fileName); + return -1; + } + + cJSON *metrics = cJSON_GetObjectItem(root, "metrics"); + if (metrics != NULL) { + int size = cJSON_GetArraySize(metrics); + if (size <= 0) { + httpError("failed to parse telegraf schema config file:%s, metrics size is 0", fileName); + cJSON_Delete(root); + return -1; + } + + for (int i = 0; i < size; i++) { + cJSON *metric = cJSON_GetArrayItem(metrics, i); + if (metric != NULL) { + tgReadSchemaMetric(metric); + } + } + } else { + tgReadSchemaMetric(root); + } + + cJSON_Delete(root); + free(content); + fclose(fp); + + httpPrint("parse telegraf schema config file:%s successfully, stable schema size:%d", fileName); + return 0; +} + +/* + * in case of file not exist + * we use default schema: + * such as: + * diskio + * mem + * processes + * procstat + * system + * disk + * swap + * kernel + */ +void tgInitHandle(HttpServer *pServer) { + tgSchemaHash = taosInitStrHash(100, sizeof(STgStable), taosHashStringStep1); + char fileName[256] = {0}; + sprintf(fileName, "%s/taos.telegraf.cfg", configDir); + if (tgReadSchema(fileName) == -1) { + taosCleanUpStrHash(tgSchemaHash); + tgSchemaHash = NULL; + } + httpAddMethod(pServer, &tgDecodeMethod); +} + +bool tgGetUserFromUrl(HttpContext *pContext) { + HttpParser *pParser = &pContext->pThread->parser; + if (pParser->path[TG_USER_URL_POS].len > TSDB_USER_LEN - 1 || pParser->path[TG_USER_URL_POS].len <= 0) { + return false; + } + + strcpy(pContext->user, pParser->path[TG_USER_URL_POS].pos); + return true; +} + +bool tgGetPassFromUrl(HttpContext *pContext) { + HttpParser *pParser = &pContext->pThread->parser; + if (pParser->path[TG_PASS_URL_POS].len > TSDB_PASSWORD_LEN - 1 || pParser->path[TG_PASS_URL_POS].len <= 0) { + return false; + } + + strcpy(pContext->pass, pParser->path[TG_PASS_URL_POS].pos); + return true; +} + +char *tgGetDbFromUrl(HttpContext *pContext) { + HttpParser *pParser = &pContext->pThread->parser; + if (pParser->path[TG_DB_URL_POS].len <= 0) { + httpSendErrorResp(pContext, HTTP_TG_DB_NOT_INPUT); + return NULL; + } + + if (pParser->path[TG_DB_URL_POS].len >= TSDB_DB_NAME_LEN) { + httpSendErrorResp(pContext, HTTP_TG_DB_TOO_LONG); + return NULL; + } + + return pParser->path[TG_DB_URL_POS].pos; +} + +/* + * parse single metric + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + */ +bool tgProcessSingleMetricUseDefaultSchema(HttpContext *pContext, cJSON *metric, char *db) { + // metric name + cJSON *name = cJSON_GetObjectItem(metric, "name"); + if (name == NULL) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NULL); + return false; + } + if (name->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_TYPE); + return false; + } + if (name->valuestring == NULL) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL); + return false; + } + int nameLen = (int)strlen(name->valuestring); + if (nameLen == 0) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL); + return false; + } + if (nameLen >= TSDB_METER_NAME_LEN - 7) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_LONG); + return false; + } + + // timestamp + cJSON *timestamp = cJSON_GetObjectItem(metric, "timestamp"); + if (timestamp == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_NULL); + return false; + } + if (timestamp->type != cJSON_Number) { + httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_TYPE); + return false; + } + if (timestamp->valueint <= 0) { + httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_VAL_NULL); + return false; + } + + // tags + cJSON *tags = cJSON_GetObjectItem(metric, "tags"); + if (tags == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TAGS_NULL); + return false; + } + + int tagsSize = cJSON_GetArraySize(tags); + if (tagsSize <= 0) { + httpSendErrorResp(pContext, HTTP_TG_TAGS_SIZE_0); + return false; + } + + if (tagsSize > TG_MAX_SORT_TAG_SIZE) { + httpSendErrorResp(pContext, HTTP_TG_TAGS_SIZE_LONG); + return false; + } + + cJSON *host = NULL; + + for (int i = 0; i < tagsSize; i++) { + cJSON *tag = cJSON_GetArrayItem(tags, i); + if (tag == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TAG_NULL); + return false; + } + if (tag->string == NULL || strlen(tag->string) == 0) { + httpSendErrorResp(pContext, HTTP_TG_TAG_NAME_NULL); + return false; + } + + /* + * tag size may be larget than TSDB_COL_NAME_LEN + * we keep the first TSDB_COL_NAME_LEN bytes + */ + if (0) { + if (strlen(tag->string) >= TSDB_COL_NAME_LEN) { + httpSendErrorResp(pContext, HTTP_TG_TAG_NAME_SIZE); + return false; + } + } + + if (tag->type != cJSON_Number && tag->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_TYPE); + return false; + } + + if (tag->type == cJSON_String) { + if (tag->valuestring == NULL || strlen(tag->valuestring) == 0) { + httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_NULL); + return false; + } + } + + if (strcasecmp(tag->string, "host") == 0) { + host = tag; + } + } + + if (host == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TABLE_NULL); + return false; + } + + if (host->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_HOST_NOT_STRING); + return false; + } + + if (strlen(host->valuestring) >= TSDB_METER_NAME_LEN) { + httpSendErrorResp(pContext, HTTP_TG_TABLE_SIZE); + return false; + } + + // fields + cJSON *fields = cJSON_GetObjectItem(metric, "fields"); + if (fields == NULL) { + httpSendErrorResp(pContext, HTTP_TG_FIELDS_NULL); + return false; + } + + int fieldsSize = cJSON_GetArraySize(fields); + if (fieldsSize <= 0) { + httpSendErrorResp(pContext, HTTP_TG_FIELDS_SIZE_0); + return false; + } + + if (fieldsSize > (TSDB_MAX_COLUMNS - TSDB_MAX_TAGS - 1)) { + httpSendErrorResp(pContext, HTTP_TG_FIELDS_SIZE_LONG); + return false; + } + + for (int i = 0; i < fieldsSize; i++) { + cJSON *field = cJSON_GetArrayItem(fields, i); + if (field == NULL) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_NULL); + return false; + } + if (field->string == NULL || strlen(field->string) == 0) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_NAME_NULL); + return false; + } + /* + * tag size may be larget than TSDB_COL_NAME_LEN + * we keep the first TSDB_COL_NAME_LEN bytes + */ + if (0) { + if (strlen(field->string) >= TSDB_COL_NAME_LEN) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_NAME_SIZE); + return false; + } + } + if (field->type != cJSON_Number && field->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_TYPE); + return false; + } + if (field->type == cJSON_String) { + if (field->valuestring == NULL || strlen(field->valuestring) == 0) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_NULL); + return false; + } + } + } + + // assembling cmds + HttpSqlCmd *stable_cmd = httpNewSqlCmd(pContext); + if (stable_cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + return false; + } + stable_cmd->cmdType = HTTP_CMD_TYPE_CREATE_STBALE; + stable_cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN; + + HttpSqlCmd *table_cmd = httpNewSqlCmd(pContext); + if (table_cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + return false; + } + table_cmd->cmdType = HTTP_CMD_TYPE_INSERT; + + // order by tag name + cJSON *orderedTags[TG_MAX_SORT_TAG_SIZE] = {0}; + int orderTagsLen = 0; + for (int i = 0; i < tagsSize; ++i) { + cJSON *tag = cJSON_GetArrayItem(tags, i); + orderedTags[orderTagsLen++] = tag; + for (int j = orderTagsLen - 1; j >= 1; --j) { + cJSON *tag1 = orderedTags[j]; + cJSON *tag2 = orderedTags[j - 1]; + if (strcasecmp(tag1->string, "host") == 0 || strcmp(tag1->string, tag2->string) < 0) { + orderedTags[j] = tag2; + orderedTags[j - 1] = tag1; + } + } + } + orderTagsLen = orderTagsLen < TSDB_MAX_TAGS ? orderTagsLen : TSDB_MAX_TAGS; + + table_cmd->tagNum = stable_cmd->tagNum = (int8_t)orderTagsLen; + table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint); + + // stable name + char *stname = name->valuestring; + table_cmd->metric = stable_cmd->metric = httpAddToSqlCmdBuffer(pContext, "%s", stname); + table_cmd->stable = stable_cmd->stable = httpAddToSqlCmdBuffer(pContext, "%s", stname); + //httpAddToSqlCmdBuffer(pContext, "%s_%d_%d", stname, fieldsSize, orderTagsLen); + table_cmd->stable = stable_cmd->stable = + httpShrinkTableName(pContext, table_cmd->stable, httpGetCmdsString(pContext, table_cmd->stable)); + + // stable tag for detail + for (int i = 0; i < orderTagsLen; ++i) { + cJSON *tag = orderedTags[i]; + stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tag->string); + + if (tag->type == cJSON_String) + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); + else if (tag->type == cJSON_Number) + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%ld", tag->valueint); + else if (tag->type == cJSON_True) + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "1"); + else if (tag->type == cJSON_False) + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "0"); + else + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "NULL"); + } + + // table name + table_cmd->table = stable_cmd->table = httpAddToSqlCmdBufferNoTerminal(pContext, "%s_%s", stname, host->valuestring); + //httpAddToSqlCmdBufferNoTerminal(pContext, "%s_%d_%d_%s", stname, fieldsSize, orderTagsLen, host->valuestring); + + for (int i = 0; i < orderTagsLen; ++i) { + cJSON *tag = orderedTags[i]; + if (tag == host) continue; + if (tag->type == cJSON_String) + httpAddToSqlCmdBufferNoTerminal(pContext, "_%s", tag->valuestring); + else if (tag->type == cJSON_Number) + httpAddToSqlCmdBufferNoTerminal(pContext, "_%ld", tag->valueint); + else if (tag->type == cJSON_False) + httpAddToSqlCmdBufferNoTerminal(pContext, "_0"); + else if (tag->type == cJSON_True) + httpAddToSqlCmdBufferNoTerminal(pContext, "_1"); + else + httpAddToSqlCmdBufferNoTerminal(pContext, "_n"); + } + httpAddToSqlCmdBuffer(pContext, ""); + + table_cmd->table = stable_cmd->table = + httpShrinkTableName(pContext, table_cmd->table, httpGetCmdsString(pContext, table_cmd->table)); + + // assembling create stable sql + stable_cmd->sql = httpAddToSqlCmdBufferNoTerminal(pContext, "create table if not exists %s.%s(ts timestamp", db, + httpGetCmdsString(pContext, table_cmd->stable)); + for (int i = 0; i < fieldsSize; ++i) { + cJSON *field = cJSON_GetArrayItem(fields, i); + char * field_type = "double"; + if (field->type == cJSON_String) + field_type = "binary(32)"; + else if (field->type == cJSON_False || field->type == cJSON_True) + field_type = "tinyint"; + else { + } + + char *field_name = field->string; + httpAddToSqlCmdBufferNoTerminal(pContext, ",f_%s %s", field_name, field_type); + } + httpAddToSqlCmdBufferNoTerminal(pContext, ") tags("); + + for (int i = 0; i < orderTagsLen; ++i) { + cJSON *tag = orderedTags[i]; + char * tag_type = "bigint"; + if (tag->type == cJSON_String) + tag_type = "binary(32)"; + else if (tag->type == cJSON_False || tag->type == cJSON_True) + tag_type = "tinyint"; + else { + } + + char *tag_name = tag->string; + if (i != orderTagsLen - 1) + httpAddToSqlCmdBufferNoTerminal(pContext, "t_%s %s,", tag_name, tag_type); + else + httpAddToSqlCmdBuffer(pContext, "t_%s %s)", tag_name, tag_type); + } + + // assembling insert sql + table_cmd->sql = httpAddToSqlCmdBufferNoTerminal(pContext, "import into %s.%s using %s.%s tags(", db, + httpGetCmdsString(pContext, table_cmd->table), db, + httpGetCmdsString(pContext, table_cmd->stable)); + for (int i = 0; i < orderTagsLen; ++i) { + cJSON *tag = orderedTags[i]; + if (i != orderTagsLen - 1) { + if (tag->type == cJSON_Number) + httpAddToSqlCmdBufferNoTerminal(pContext, "%ld,", tag->valueint); + else if (tag->type == cJSON_String) + httpAddToSqlCmdBufferNoTerminal(pContext, "'%s',", tag->valuestring); + else if (tag->type == cJSON_False) + httpAddToSqlCmdBufferNoTerminal(pContext, "0,"); + else if (tag->type == cJSON_True) + httpAddToSqlCmdBufferNoTerminal(pContext, "1,"); + else { + httpAddToSqlCmdBufferNoTerminal(pContext, "NULL,"); + } + } else { + if (tag->type == cJSON_Number) + httpAddToSqlCmdBufferNoTerminal(pContext, "%ld)", tag->valueint); + else if (tag->type == cJSON_String) + httpAddToSqlCmdBufferNoTerminal(pContext, "'%s')", tag->valuestring); + else if (tag->type == cJSON_False) + httpAddToSqlCmdBufferNoTerminal(pContext, "0)"); + else if (tag->type == cJSON_True) + httpAddToSqlCmdBufferNoTerminal(pContext, "1)"); + else { + httpAddToSqlCmdBufferNoTerminal(pContext, "NULL)"); + } + } + } + + httpAddToSqlCmdBufferNoTerminal(pContext, " values(%ld,", timestamp->valueint); + for (int i = 0; i < fieldsSize; ++i) { + cJSON *field = cJSON_GetArrayItem(fields, i); + if (i != fieldsSize - 1) { + if (field->type == cJSON_Number) + httpAddToSqlCmdBufferNoTerminal(pContext, "%lf,", field->valuedouble); + else if (field->type == cJSON_String) + httpAddToSqlCmdBufferNoTerminal(pContext, "'%s',", field->valuestring); + else if (field->type == cJSON_False) + httpAddToSqlCmdBufferNoTerminal(pContext, "0,"); + else if (field->type == cJSON_True) + httpAddToSqlCmdBufferNoTerminal(pContext, "1,"); + else { + httpAddToSqlCmdBufferNoTerminal(pContext, "NULL,"); + } + } else { + if (field->type == cJSON_Number) + httpAddToSqlCmdBuffer(pContext, "%lf)", field->valuedouble); + else if (field->type == cJSON_String) + httpAddToSqlCmdBuffer(pContext, "'%s')", field->valuestring); + else if (field->type == cJSON_False) + httpAddToSqlCmdBuffer(pContext, "0)"); + else if (field->type == cJSON_True) + httpAddToSqlCmdBuffer(pContext, "1)"); + else { + httpAddToSqlCmdBuffer(pContext, "NULL)"); + } + } + } + + return true; +} + +bool tgProcessSingleMetricUseConfigSchema(HttpContext *pContext, cJSON *metric, char *db) { + // metric name + cJSON *name = cJSON_GetObjectItem(metric, "name"); + if (name == NULL) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NULL); + return false; + } + if (name->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_TYPE); + return false; + } + if (name->valuestring == NULL) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL); + return false; + } + int nameLen = (int)strlen(name->valuestring); + if (nameLen == 0) { + httpSendErrorResp(pContext, HTTP_TG_METRIC_NAME_NULL); + return false; + } + STgStable *stable = (STgStable *)taosGetStrHashData(tgSchemaHash, name->valuestring); + if (stable == NULL) { + httpSendErrorResp(pContext, HTTP_TG_STABLE_NOT_EXIST); + return false; + } + + // timestamp + cJSON *timestamp = cJSON_GetObjectItem(metric, "timestamp"); + if (timestamp == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_NULL); + return false; + } + if (timestamp->type != cJSON_Number) { + httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_TYPE); + return false; + } + if (timestamp->valueint <= 0) { + httpSendErrorResp(pContext, HTTP_TG_TIMESTAMP_VAL_NULL); + return false; + } + + // tags + cJSON *tags = cJSON_GetObjectItem(metric, "tags"); + if (tags == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TAGS_NULL); + return false; + } + int tagsSize = cJSON_GetArraySize(tags); + if (tagsSize <= 0) { + httpSendErrorResp(pContext, HTTP_TG_TAGS_SIZE_0); + return false; + } + for (int i = 0; i < tagsSize; i++) { + cJSON *tag = cJSON_GetArrayItem(tags, i); + if (tag == NULL) { + httpSendErrorResp(pContext, HTTP_TG_TAG_NULL); + return false; + } + if (tag->string == NULL || strlen(tag->string) == 0) { + httpSendErrorResp(pContext, HTTP_TG_TAG_NAME_NULL); + return false; + } + if (tag->type != cJSON_Number && tag->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_TYPE); + return false; + } + if (tag->type == cJSON_String) { + if (tag->valuestring == NULL || strlen(tag->valuestring) == 0) { + httpSendErrorResp(pContext, HTTP_TG_TAG_VALUE_NULL); + return false; + } + } + } + + // fields + cJSON *fields = cJSON_GetObjectItem(metric, "fields"); + if (fields == NULL) { + httpSendErrorResp(pContext, HTTP_TG_FIELDS_NULL); + return false; + } + int fieldsSize = cJSON_GetArraySize(fields); + if (fieldsSize <= 0) { + httpSendErrorResp(pContext, HTTP_TG_FIELDS_SIZE_0); + return false; + } + for (int i = 0; i < fieldsSize; i++) { + cJSON *field = cJSON_GetArrayItem(fields, i); + if (field == NULL) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_NULL); + return false; + } + if (field->string == NULL || strlen(field->string) == 0) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_NAME_NULL); + return false; + } + if (field->type != cJSON_Number && field->type != cJSON_String) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_TYPE); + return false; + } + if (field->type == cJSON_String) { + if (field->valuestring == NULL || strlen(field->valuestring) == 0) { + httpSendErrorResp(pContext, HTTP_TG_FIELD_VALUE_NULL); + return false; + } + } + } + + // assembling cmds + HttpSqlCmd *stable_cmd = httpNewSqlCmd(pContext); + if (stable_cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + return false; + } + stable_cmd->cmdType = HTTP_CMD_TYPE_CREATE_STBALE; + stable_cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN; + + HttpSqlCmd *table_cmd = httpNewSqlCmd(pContext); + if (table_cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + return false; + } + table_cmd->cmdType = HTTP_CMD_TYPE_INSERT; + table_cmd->tagNum = stable_cmd->tagNum = (int8_t)stable->tagNum; + table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint); + + // stable name + char *stname = stable->stAlias; + + table_cmd->metric = stable_cmd->metric = httpAddToSqlCmdBuffer(pContext, "%s", stname); + table_cmd->stable = stable_cmd->stable = httpAddToSqlCmdBuffer(pContext, "%s", stname); + table_cmd->stable = stable_cmd->stable = + httpShrinkTableName(pContext, table_cmd->stable, httpGetCmdsString(pContext, table_cmd->stable)); + + // stable tag for detail + for (int ts = 0; ts < stable->tagNum; ++ts) { + STgTag *tagSchema = &stable->tags[ts]; + bool tagParsed = false; + for (int tt = 0; tt < tagsSize; ++tt) { + cJSON *tag = cJSON_GetArrayItem(tags, tt); + if (strcasecmp(tag->string, tagSchema->tagName) != 0) { + continue; + } + + stable_cmd->tagNames[ts] = table_cmd->tagNames[ts] = httpAddToSqlCmdBuffer(pContext, tagSchema->tagAlias); + + if (tag->type == cJSON_String) { + stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = + httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); + tagParsed = true; + } else if (tag->type == cJSON_Number) { + stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "%ld", tag->valueint); + tagParsed = true; + } else if (tag->type == cJSON_True) { + stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "1"); + tagParsed = true; + } else if (tag->type == cJSON_False) { + stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "0"); + tagParsed = true; + } else { + } + + break; + } + + if (!tagParsed) { + stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBuffer(pContext, "NULL"); + } + } + + // table name + table_cmd->table = stable_cmd->table = httpAddToSqlCmdBufferNoTerminal(pContext, "%s", stname); + for (int ts = 0; ts < stable->tagNum; ++ts) { + STgTag *tagSchema = &stable->tags[ts]; + bool tagParsed = false; + for (int tt = 0; tt < tagsSize; ++tt) { + cJSON *tag = cJSON_GetArrayItem(tags, tt); + if (strcasecmp(tag->string, tagSchema->tagName) != 0) { + continue; + } + + if (tag->type == cJSON_String) { + httpAddToSqlCmdBufferNoTerminal(pContext, "_%s", tag->valuestring); + tagParsed = true; + } else if (tag->type == cJSON_Number) { + httpAddToSqlCmdBufferNoTerminal(pContext, "_%ld", tag->valueint); + tagParsed = true; + } else if (tag->type == cJSON_True) { + httpAddToSqlCmdBufferNoTerminal(pContext, "_1"); + tagParsed = true; + } else if (tag->type == cJSON_False) { + httpAddToSqlCmdBufferNoTerminal(pContext, "_0"); + tagParsed = true; + } else { + } + + break; + } + + if (!tagParsed) { + stable_cmd->tagValues[ts] = table_cmd->tagValues[ts] = httpAddToSqlCmdBufferNoTerminal(pContext, "_n"); + } + } + httpAddToSqlCmdBuffer(pContext, ""); + table_cmd->table = stable_cmd->table = + httpShrinkTableName(pContext, table_cmd->table, httpGetCmdsString(pContext, table_cmd->table)); + + // assembling create stable sql + stable_cmd->sql = + httpAddToSqlCmdBuffer(pContext, stable->createSTableStr, db, httpGetCmdsString(pContext, table_cmd->stable)); + + // assembling insert sql + table_cmd->sql = httpAddToSqlCmdBufferNoTerminal(pContext, "import into %s.%s using %s.%s tags(", db, + httpGetCmdsString(pContext, table_cmd->table), db, + httpGetCmdsString(pContext, table_cmd->stable)); + for (int ts = 0; ts < stable->tagNum; ++ts) { + if (ts != 0) { + httpAddToSqlCmdBufferNoTerminal(pContext, ",%s", httpGetCmdsString(pContext, stable_cmd->tagValues[ts])); + } else { + httpAddToSqlCmdBufferNoTerminal(pContext, "%s", httpGetCmdsString(pContext, stable_cmd->tagValues[ts])); + } + } + + httpAddToSqlCmdBufferNoTerminal(pContext, ") values(%ld", timestamp->valueint); + + // stable tag for detail + for (int fs = 0; fs < stable->fieldNum; ++fs) { + STgField *fieldSchema = &stable->fields[fs]; + bool fieldParsed = false; + for (int ff = 0; ff < fieldsSize; ++ff) { + cJSON *field = cJSON_GetArrayItem(fields, ff); + if (strcasecmp(field->string, fieldSchema->fieldName) != 0) { + continue; + } + + if (field->type == cJSON_String) { + httpAddToSqlCmdBufferNoTerminal(pContext, ",\"%s\"", field->valuestring); + fieldParsed = true; + } else if (field->type == cJSON_Number) { + httpAddToSqlCmdBufferNoTerminal(pContext, ",%lf", field->valuedouble); + fieldParsed = true; + } else if (field->type == cJSON_True) { + httpAddToSqlCmdBufferNoTerminal(pContext, ",1"); + fieldParsed = true; + } else if (field->type == cJSON_False) { + httpAddToSqlCmdBufferNoTerminal(pContext, ",0"); + fieldParsed = true; + } else { + } + + break; + } + + if (!fieldParsed) { + httpAddToSqlCmdBufferNoTerminal(pContext, ",NULL"); + } + } + httpAddToSqlCmdBuffer(pContext, ")"); + + return true; +} + +/** + * request from telegraf 1.7.0 + * single request: + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + * multiple request: + { + "metrics": [ + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + }, + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + },orderTagsLen + "timestamp": 1458229140 + } + ] + } + */ +bool tgProcessQueryRequest(HttpContext *pContext, char *db) { + httpTrace("context:%p, fd:%d, ip:%s, process telegraf query msg", pContext, pContext->fd, pContext->ipstr); + + HttpParser *pParser = &pContext->pThread->parser; + char * filter = pParser->data.pos; + if (filter == NULL) { + httpSendErrorResp(pContext, HTTP_NO_MSG_INPUT); + return false; + } + + cJSON *root = cJSON_Parse(filter); + if (root == NULL) { + httpSendErrorResp(pContext, HTTP_TG_INVALID_JSON); + return false; + } + + cJSON *metrics = cJSON_GetObjectItem(root, "metrics"); + if (metrics != NULL) { + int size = cJSON_GetArraySize(metrics); + httpTrace("context:%p, fd:%d, ip:%s, multiple metrics:%d at one time", pContext, pContext->fd, pContext->ipstr, + size); + if (size <= 0) { + httpSendErrorResp(pContext, HTTP_TG_METRICS_NULL); + cJSON_Delete(root); + return false; + } + + int cmdSize = size * 2 + 1; + if (cmdSize > HTTP_MAX_CMD_SIZE) { + httpSendErrorResp(pContext, HTTP_TG_METRICS_SIZE); + cJSON_Delete(root); + return false; + } + + if (!httpMallocMultiCmds(pContext, cmdSize, HTTP_BUFFER_SIZE)) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + cJSON_Delete(root); + return false; + } + + HttpSqlCmd *cmd = httpNewSqlCmd(pContext); + if (cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + cJSON_Delete(root); + return false; + } + cmd->cmdType = HTTP_CMD_TYPE_CREATE_DB; + cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN; + cmd->sql = httpAddToSqlCmdBuffer(pContext, "create database if not exists %s", db); + + for (int i = 0; i < size; i++) { + cJSON *metric = cJSON_GetArrayItem(metrics, i); + if (metric != NULL) { + if (tgSchemaHash != NULL) { + if (!tgProcessSingleMetricUseConfigSchema(pContext, metric, db)) { + cJSON_Delete(root); + return false; + } + } else { + if (!tgProcessSingleMetricUseDefaultSchema(pContext, metric, db)) { + cJSON_Delete(root); + return false; + } + } + } + } + } else { + httpTrace("context:%p, fd:%d, ip:%s, single metric", pContext, pContext->fd, pContext->ipstr); + + if (!httpMallocMultiCmds(pContext, 3, HTTP_BUFFER_SIZE)) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + cJSON_Delete(root); + return false; + } + + HttpSqlCmd *cmd = httpNewSqlCmd(pContext); + if (cmd == NULL) { + httpSendErrorResp(pContext, HTTP_NO_ENOUGH_MEMORY); + cJSON_Delete(root); + return false; + } + cmd->cmdType = HTTP_CMD_TYPE_CREATE_DB; + cmd->cmdReturnType = HTTP_CMD_RETURN_TYPE_NO_RETURN; + cmd->sql = httpAddToSqlCmdBuffer(pContext, "create database if not exists %s", db); + + if (tgSchemaHash != NULL) { + if (!tgProcessSingleMetricUseConfigSchema(pContext, root, db)) { + cJSON_Delete(root); + return false; + } + } else { + if (!tgProcessSingleMetricUseDefaultSchema(pContext, root, db)) { + cJSON_Delete(root); + return false; + } + } + } + + cJSON_Delete(root); + + pContext->reqType = HTTP_REQTYPE_MULTI_SQL; + pContext->encodeMethod = &tgQueryMethod; + pContext->multiCmds->pos = 2; + + return true; +} + +bool tgProcessRquest(struct HttpContext *pContext) { + tgGetUserFromUrl(pContext); + tgGetPassFromUrl(pContext); + + if (strlen(pContext->user) == 0 || strlen(pContext->pass) == 0) { + httpSendErrorResp(pContext, HTTP_PARSE_USR_ERROR); + return false; + } + + char *db = tgGetDbFromUrl(pContext); + if (db == NULL) { + return false; + } + + return tgProcessQueryRequest(pContext, db); +} diff --git a/src/modules/http/src/tgJson.c b/src/modules/http/src/tgJson.c new file mode 100644 index 000000000000..f706a56bd90d --- /dev/null +++ b/src/modules/http/src/tgJson.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "httpJson.h" +#include "httpResp.h" +#include "taosmsg.h" +#include "tgHandle.h" +#include "tgJson.h" + +void tgInitQueryJson(HttpContext *pContext) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + httpInitJsonBuf(jsonBuf, pContext); + httpWriteJsonBufHead(jsonBuf); + + // array begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonObjStt); + + httpJsonPairHead(jsonBuf, "metrics", 7); + + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonArrStt); +} + +void tgCleanQueryJson(HttpContext *pContext) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // array end + httpJsonToken(jsonBuf, JsonArrEnd); + httpJsonToken(jsonBuf, JsonObjEnd); + + httpWriteJsonBufEnd(jsonBuf); +} + +void tgStartQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // object begin + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonObjStt); + + // data + httpJsonItemToken(jsonBuf); + httpJsonPair(jsonBuf, "metric", 6, httpGetCmdsString(pContext, cmd->stable), + (int)strlen(httpGetCmdsString(pContext, cmd->metric))); + + httpJsonItemToken(jsonBuf); + httpJsonPair(jsonBuf, "stable", 6, httpGetCmdsString(pContext, cmd->stable), + (int)strlen(httpGetCmdsString(pContext, cmd->stable))); + + httpJsonItemToken(jsonBuf); + httpJsonPair(jsonBuf, "table", 5, httpGetCmdsString(pContext, cmd->table), + (int)strlen(httpGetCmdsString(pContext, cmd->table))); + + httpJsonItemToken(jsonBuf); + httpJsonPair(jsonBuf, "timestamp", 9, httpGetCmdsString(pContext, cmd->timestamp), + (int)strlen(httpGetCmdsString(pContext, cmd->timestamp))); // hack way +} + +void tgStopQueryJson(HttpContext *pContext, HttpSqlCmd *cmd) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // data + httpJsonItemToken(jsonBuf); + httpJsonPairStatus(jsonBuf, cmd->code); + + // object end + httpJsonToken(jsonBuf, JsonObjEnd); +} + +void tgBuildSqlAffectRowsJson(HttpContext *pContext, HttpSqlCmd *cmd, int affect_rows) { + JsonBuf *jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) return; + + // data + httpJsonPairIntVal(jsonBuf, "affected_rows", 13, affect_rows); +} + +bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + httpTrace("context:%p, fd:%d, ip:%s, check telegraf command, code:%d, state:%d, type:%d, rettype:%d, tags:%d", + pContext, pContext->fd, pContext->ipstr, code, cmd->cmdState, cmd->cmdType, cmd->cmdReturnType, cmd->tagNum); + + if (cmd->cmdType == HTTP_CMD_TYPE_INSERT) { + if (cmd->cmdState == HTTP_CMD_STATE_NOT_RUN_YET) { + if (code == TSDB_CODE_DB_NOT_SELECTED || code == TSDB_CODE_INVALID_DB) { + cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED; + if (multiCmds->cmds[0].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) { + multiCmds->pos = (int16_t)-1; + httpTrace("context:%p, fd:%d, ip:%s, import failed, try create database", pContext, pContext->fd, + pContext->ipstr); + return false; + } + } else if (code == TSDB_CODE_INVALID_TABLE) { + cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED; + if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) { + multiCmds->pos = (int16_t)(multiCmds->pos - 2); + httpTrace("context:%p, fd:%d, ip:%s, import failed, try create stable", pContext, pContext->fd, + pContext->ipstr); + return false; + } + } else { + } + } else { + } + } else if (cmd->cmdType == HTTP_CMD_TYPE_CREATE_DB) { + cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED; + httpTrace("context:%p, fd:%d, ip:%s, code:%d, create database failed", pContext, pContext->fd, pContext->ipstr, + code); + } else if (cmd->cmdType == HTTP_CMD_TYPE_CREATE_STBALE) { + cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED; + httpTrace("context:%p, fd:%d, ip:%s, code:%d, create stable failed", pContext, pContext->fd, pContext->ipstr, code); + } else { + } + + return true; +} + +void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) { + HttpSqlCmds *multiCmds = pContext->multiCmds; + httpTrace("context:%p, fd:%d, ip:%s, get telegraf next command, pos:%d, code:%d, state:%d, type:%d, rettype:%d, tags:%d", + pContext, pContext->fd, pContext->ipstr, multiCmds->pos, code, cmd->cmdState, cmd->cmdType, + cmd->cmdReturnType, cmd->tagNum); + + if (cmd->cmdType == HTTP_CMD_TYPE_INSERT) { + multiCmds->pos = (int16_t)(multiCmds->pos + 2); + } else if (cmd->cmdType == HTTP_CMD_TYPE_CREATE_DB) { + multiCmds->pos++; + } else if (cmd->cmdType == HTTP_CMD_TYPE_CREATE_STBALE) { + multiCmds->pos++; + } else { + multiCmds->pos++; + } +} \ No newline at end of file diff --git a/src/modules/monitor/CMakeLists.txt b/src/modules/monitor/CMakeLists.txt new file mode 100755 index 000000000000..ef6657ec9541 --- /dev/null +++ b/src/modules/monitor/CMakeLists.txt @@ -0,0 +1,9 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc ../http/inc ../../../deps/inc ../../client/inc) + +ADD_LIBRARY(monitor ${SRC}) +TARGET_LINK_LIBRARIES(monitor taos_static trpc tutil z) diff --git a/src/modules/monitor/inc/monitor.h b/src/modules/monitor/inc/monitor.h new file mode 100644 index 000000000000..954f4898b195 --- /dev/null +++ b/src/modules/monitor/inc/monitor.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __MONITOR_H__ +#define __MONITOR_H__ + +#include "tglobalcfg.h" +#include "tlog.h" + +#define monitorError(...) \ + if (monitorDebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR MON ", 255, __VA_ARGS__); \ + } +#define monitorWarn(...) \ + if (monitorDebugFlag & DEBUG_WARN) { \ + tprintf("WARN MON ", monitorDebugFlag, __VA_ARGS__); \ + } +#define monitorTrace(...) \ + if (monitorDebugFlag & DEBUG_TRACE) { \ + tprintf("MON ", monitorDebugFlag, __VA_ARGS__); \ + } +#define monitorPrint(...) \ + { tprintf("MON ", 255, __VA_ARGS__); } + +#define monitorLError(...) taosLogError(__VA_ARGS__) monitorError(__VA_ARGS__) +#define monitorLWarn(...) taosLogWarn(__VA_ARGS__) monitorWarn(__VA_ARGS__) +#define monitorLPrint(...) taosLogPrint(__VA_ARGS__) monitorPrint(__VA_ARGS__) + +#endif \ No newline at end of file diff --git a/src/modules/monitor/inc/monitorSystem.h b/src/modules/monitor/inc/monitorSystem.h new file mode 100644 index 000000000000..339f2795b1fb --- /dev/null +++ b/src/modules/monitor/inc/monitorSystem.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef __MONITOR_SYSTEM_H__ +#define __MONITOR_SYSTEM_H__ + +#include + +int monitorInitSystem(); +int monitorStartSystem(); +void monitorStopSystem(); +void monitorCleanUpSystem(); + +typedef struct { + int selectReqNum; + int insertReqNum; + int httpReqNum; +} SCountInfo; + +extern void (*monitorCountReqFp)(SCountInfo *info); + +#endif \ No newline at end of file diff --git a/src/modules/monitor/src/monitorSystem.c b/src/modules/monitor/src/monitorSystem.c new file mode 100644 index 000000000000..0f1b2089073b --- /dev/null +++ b/src/modules/monitor/src/monitorSystem.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "monitor.h" +#include +#include +#include +#include "monitorSystem.h" +#include "tsclient.h" +#include "tsdb.h" +#include "tsystem.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +#define SQL_LENGTH 1024 +#define LOG_LEN_STR 80 +#define IP_LEN_STR 15 + +typedef enum { + MONITOR_CMD_CREATE_DB, + MONITOR_CMD_CREATE_TB_LOG, + MONITOR_CMD_CREATE_MT_DN, + MONITOR_CMD_CREATE_TB_DN, + MONITOR_CMD_CREATE_TB_SLOWQUERY, + MONITOR_CMD_MAX +} MonitorCommand; + +typedef enum { + MONITOR_STATE_UN_INIT, + MONITOR_STATE_INITIALIZING, + MONITOR_STATE_INITIALIZED, + MONITOR_STATE_STOPPED +} MonitorState; + +typedef struct { + void * conn; + void * timer; + char privateIpStr[TSDB_IPv4ADDR_LEN]; + int8_t cmdIndex; + int8_t state; + char sql[SQL_LENGTH]; + void * initTimer; +} MonitorConn; + +MonitorConn *monitor = NULL; + +TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, int port, void (*fp)(void *, TAOS_RES *, int), + void *param, void **taos); +void monitorInitConn(void *para, void *unused); +void monitorInitConnCb(void *param, TAOS_RES *result, int code); +void monitorInitDatabase(); +void monitorInitDatabaseCb(void *param, TAOS_RES *result, int code); +void monitorStartTimer(); +void monitorSaveSystemInfo(); +void monitorSaveLog(int level, const char *const format, ...); +void (*monitorCountReqFp)(SCountInfo *info) = NULL; +void monitorExecuteSQL(char *sql); + +int monitorInitSystem() { + monitor = (MonitorConn *)malloc(sizeof(MonitorConn)); + memset(monitor, 0, sizeof(MonitorConn)); + return 0; +} + +int monitorStartSystem() { + taosTmrReset(monitorInitConn, 10, NULL, tscTmr, &monitor->initTimer); + return 0; +} + +void monitorStartSystemRetry() { + if (monitor->initTimer != NULL) { + taosTmrReset(monitorInitConn, 3000, NULL, tscTmr, &monitor->initTimer); + } +} + +void monitorInitConn(void *para, void *unused) { + monitorPrint("starting to initialize monitor service .."); + monitor->state = MONITOR_STATE_INITIALIZING; + + if (monitor->privateIpStr[0] == 0) { + strcpy(monitor->privateIpStr, tsInternalIp); + for (int i = 0; i < TSDB_IPv4ADDR_LEN; ++i) { + if (monitor->privateIpStr[i] == '.') { + monitor->privateIpStr[i] = '_'; + } + } + } + + if (monitor->conn == NULL) { + taos_connect_a(NULL, "monitor", tsInternalPass, "", 0, monitorInitConnCb, monitor, &(monitor->conn)); + } else { + monitorInitDatabase(); + } +} + +void monitorInitConnCb(void *param, TAOS_RES *result, int code) { + if (code < 0) { + monitorError("monitor:%p, connect to taosd failed, code:%d", monitor->conn, code); + taos_close(monitor->conn); + monitor->conn = NULL; + monitor->state = MONITOR_STATE_UN_INIT; + monitorStartSystemRetry(); + return; + } + + monitorTrace("monitor:%p, connect to taosd success, code:%d", monitor->conn, code); + monitorInitDatabase(); +} + +void dnodeBuildMonitorSql(char *sql, int cmd) { + memset(sql, 0, SQL_LENGTH); + + if (cmd == MONITOR_CMD_CREATE_DB) { + snprintf(sql, SQL_LENGTH, + "create database if not exists %s replica 1 days 10 keep 30 rows 1024 cache 2048 " + "ablocks 2 tblocks 32 tables 32 precision us", + tsMonitorDbName); + } else if (cmd == MONITOR_CMD_CREATE_MT_DN) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.dn(ts timestamp" + ", cpu_taosd float, cpu_system float, cpu_cores int" + ", mem_taosd float, mem_system float, mem_total int" + ", disk_used float, disk_total int" + ", band_speed float" + ", io_read float, io_write float" + ", req_http int, req_select int, req_insert int" + ") tags (ipaddr binary(%d))", + tsMonitorDbName, IP_LEN_STR + 1); + } else if (cmd == MONITOR_CMD_CREATE_TB_DN) { + snprintf(sql, SQL_LENGTH, "create table if not exists %s.dn_%s using %s.dn tags('%s')", tsMonitorDbName, + monitor->privateIpStr, tsMonitorDbName, tsInternalIp); + } else if (cmd == MONITOR_CMD_CREATE_TB_SLOWQUERY) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.slowquery(ts timestamp, username " + "binary(%d), created_time timestamp, time bigint, sql binary(%d))", + tsMonitorDbName, TSDB_METER_ID_LEN, TSDB_SHOW_SQL_LEN); + } else if (cmd == MONITOR_CMD_CREATE_TB_LOG) { + snprintf(sql, SQL_LENGTH, + "create table if not exists %s.log(ts timestamp, level tinyint, " + "content binary(%d), ipaddr binary(%d))", + tsMonitorDbName, LOG_LEN_STR, IP_LEN_STR); + } + + sql[SQL_LENGTH] = 0; +} + +void monitorInitDatabase() { + if (monitor->cmdIndex < MONITOR_CMD_MAX) { + dnodeBuildMonitorSql(monitor->sql, monitor->cmdIndex); + taos_query_a(monitor->conn, monitor->sql, monitorInitDatabaseCb, NULL); + } else { + monitor->state = MONITOR_STATE_INITIALIZED; + monitorPrint("monitor service init success"); + + monitorStartTimer(); + } +} + +void monitorInitDatabaseCb(void *param, TAOS_RES *result, int code) { + if (-code == TSDB_CODE_TABLE_ALREADY_EXIST || -code == TSDB_CODE_DB_ALREADY_EXIST || code >= 0) { + monitorTrace("monitor:%p, sql success, code:%d, %s", monitor->conn, code, monitor->sql); + if (monitor->cmdIndex == MONITOR_CMD_CREATE_TB_LOG) { + taosLogFp = monitorSaveLog; + taosLogSqlFp = monitorExecuteSQL; + monitorLPrint("dnode:%s is started", tsInternalIp); + } + monitor->cmdIndex++; + monitorInitDatabase(); + } else { + monitorError("monitor:%p, sql failed, code:%d, %s", monitor->conn, code, monitor->sql); + monitor->state = MONITOR_STATE_UN_INIT; + monitorStartSystemRetry(); + } +} + +void monitorStopSystem() { + if (monitor == NULL) { + return; + } + + monitorLPrint("dnode:%s is stopped", tsInternalIp); + monitor->state = MONITOR_STATE_STOPPED; + taosLogFp = NULL; + if (monitor->initTimer != NULL) { + taosTmrStopA(&(monitor->initTimer)); + } + if (monitor->timer != NULL) { + taosTmrStopA(&(monitor->timer)); + } +} + +void monitorCleanUpSystem() { + monitorPrint("monitor service cleanup"); + monitorStopSystem(); +} + +void monitorStartTimer() { + taosTmrReset(monitorSaveSystemInfo, tsMonitorInterval * 1000, NULL, tscTmr, &monitor->timer); +} + +void dnodeMontiorInsertSysCallback(void *param, TAOS_RES *result, int code) { + if (code < 0) { + monitorError("monitor:%p, save system info failed, code:%d %s", monitor->conn, code, monitor->sql); + } else if (code == 0) { + monitorError("monitor:%p, save system info failed, affect rows:%d %s", monitor->conn, code, monitor->sql); + } else { + monitorTrace("monitor:%p, save system info success, code:%d %s", monitor->conn, code, monitor->sql); + } +} + +void dnodeMontiorInsertLogCallback(void *param, TAOS_RES *result, int code) { + if (code < 0) { + monitorError("monitor:%p, save log failed, code:%d", monitor->conn, code); + } else if (code == 0) { + monitorError("monitor:%p, save log failed, affect rows:%d", monitor->conn, code); + } else { + monitorTrace("monitor:%p, save log info success, code:%d", monitor->conn, code); + } +} + +// unit is MB +int monitorBuildMemorySql(char *sql) { + float sysMemoryUsedMB = 0; + bool suc = taosGetSysMemory(&sysMemoryUsedMB); + if (!suc) { + monitorError("monitor:%p, get sys memory info failed.", monitor->conn); + } + + float procMemoryUsedMB = 0; + suc = taosGetProcMemory(&procMemoryUsedMB); + if (!suc) { + monitorError("monitor:%p, get proc memory info failed.", monitor->conn); + } + + return sprintf(sql, ", %f, %f, %d", procMemoryUsedMB, sysMemoryUsedMB, tsTotalMemoryMB); +} + +// unit is % +int monitorBuildCpuSql(char *sql) { + float sysCpuUsage = 0, procCpuUsage = 0; + bool suc = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage); + if (!suc) { + monitorError("monitor:%p, get cpu usage failed.", monitor->conn); + } + + if (sysCpuUsage <= procCpuUsage) { + sysCpuUsage = procCpuUsage + (float)0.1; + } + + return sprintf(sql, ", %f, %f, %d", procCpuUsage, sysCpuUsage, tsNumOfCores); +} + +// unit is GB +int monitorBuildDiskSql(char *sql) { + float diskUsedGB = 0; + bool suc = taosGetDisk(&diskUsedGB); + if (!suc) { + monitorError("monitor:%p, get disk info failed.", monitor->conn); + } + + return sprintf(sql, ", %f, %d", diskUsedGB, tsTotalDiskGB); +} + +// unit is Kb +int monitorBuildBandSql(char *sql) { + float bandSpeedKb = 0; + bool suc = taosGetBandSpeed(&bandSpeedKb); + if (!suc) { + monitorError("monitor:%p, get bandwidth speed failed.", monitor->conn); + } + + return sprintf(sql, ", %f", bandSpeedKb); +} + +int monitorBuildReqSql(char *sql) { + SCountInfo info; + info.httpReqNum = info.insertReqNum = info.selectReqNum = 0; + (*monitorCountReqFp)(&info); + + return sprintf(sql, ", %d, %d, %d)", info.httpReqNum, info.selectReqNum, info.insertReqNum); +} + +int monitorBuildIoSql(char *sql) { + float readKB = 0, writeKB = 0; + bool suc = taosGetProcIO(&readKB, &writeKB); + if (!suc) { + monitorError("monitor:%p, get io info failed.", monitor->conn); + } + + return sprintf(sql, ", %f, %f", readKB, writeKB); +} + +void monitorSaveSystemInfo() { + if (monitor->state != MONITOR_STATE_INITIALIZED) { + return; + } + + if (monitorCountReqFp == NULL) { + return; + } + + int64_t ts = taosGetTimestampUs(); + char * sql = monitor->sql; + int pos = snprintf(sql, SQL_LENGTH, "insert into %s.dn_%s values(%ld", tsMonitorDbName, monitor->privateIpStr, ts); + + pos += monitorBuildCpuSql(sql + pos); + pos += monitorBuildMemorySql(sql + pos); + pos += monitorBuildDiskSql(sql + pos); + pos += monitorBuildBandSql(sql + pos); + pos += monitorBuildIoSql(sql + pos); + pos += monitorBuildReqSql(sql + pos); + + monitorTrace("monitor:%p, save system info, sql:%s", monitor->conn, sql); + taos_query_a(monitor->conn, sql, dnodeMontiorInsertSysCallback, "log"); + + if (monitor->timer != NULL && monitor->state != MONITOR_STATE_STOPPED) { + monitorStartTimer(); + } +} + +void monitorSaveLog(int level, const char *const format, ...) { + va_list argpointer; + char sql[SQL_LENGTH] = {0}; + int max_length = SQL_LENGTH - 30; + + if (monitor->state != MONITOR_STATE_INITIALIZED) { + return; + } + + int len = snprintf(sql, (size_t)max_length, "import into %s.log values(%ld, %d,'", tsMonitorDbName, + taosGetTimestampUs(), level); + + va_start(argpointer, format); + len += vsnprintf(sql + len, (size_t)(max_length - len), format, argpointer); + va_end(argpointer); + if (len > max_length) len = max_length; + + len += sprintf(sql + len, "', '%s')", tsInternalIp); + sql[len++] = 0; + + monitorTrace("monitor:%p, save log, sql: %s", monitor->conn, sql); + taos_query_a(monitor->conn, sql, dnodeMontiorInsertLogCallback, "log"); +} + +void monitorExecuteSQL(char *sql) { + monitorTrace("monitor:%p, execute sql: %s", monitor->conn, sql); + taos_query_a(monitor->conn, sql, NULL, NULL); +} diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt new file mode 100755 index 000000000000..bfcdced71d5e --- /dev/null +++ b/src/rpc/CMakeLists.txt @@ -0,0 +1,9 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc) + +ADD_LIBRARY(trpc ${SRC}) +TARGET_LINK_LIBRARIES(trpc tutil) diff --git a/src/rpc/inc/thaship.h b/src/rpc/inc/thaship.h new file mode 100644 index 000000000000..262673af6299 --- /dev/null +++ b/src/rpc/inc/thaship.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _rpc_hash_ip_header_ +#define _rpc_hash_ip_header_ + +void *taosOpenIpHash(int maxSessions); +void taosCloseIpHash(void *handle); +void *taosAddIpHash(void *handle, void *pData, uint32_t ip, short port); +void taosDeleteIpHash(void *handle, uint32_t ip, short port); +void *taosGetIpHash(void *handle, uint32_t ip, short port); + +#endif diff --git a/src/rpc/inc/ttcpclient.h b/src/rpc/inc/ttcpclient.h new file mode 100644 index 000000000000..8c2131f1f6bb --- /dev/null +++ b/src/rpc/inc/ttcpclient.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _taos_tcp_client_header_ +#define _taos_tcp_client_header_ + +#include "tsdb.h" + +void *taosInitTcpClient(char *ip, short port, char *label, int num, void *fp, void *shandle); +void taosCleanUpTcpClient(void *chandle); +void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short port); +void taosCloseTcpClientConnection(void *chandle); +int taosSendTcpClientData(uint32_t ip, short port, char *data, int len, void *chandle); + +#endif diff --git a/src/rpc/inc/ttcpserver.h b/src/rpc/inc/ttcpserver.h new file mode 100644 index 000000000000..3e3feb46918f --- /dev/null +++ b/src/rpc/inc/ttcpserver.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _taos_tcp_server_header_ +#define _taos_tcp_server_header_ + +#include "tsdb.h" + +void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle); +void taosCleanUpTcpServer(void *param); +void taosCloseTcpServerConnection(void *param); +int taosSendTcpServerData(uint32_t ip, short port, char *data, int len, void *chandle); + +#endif diff --git a/src/rpc/inc/tudp.h b/src/rpc/inc/tudp.h new file mode 100644 index 000000000000..c90e21f510dd --- /dev/null +++ b/src/rpc/inc/tudp.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _taos_udp_header_ +#define _taos_udp_header_ + +#include "tsdb.h" + +void *taosInitUdpServer(char *ip, short port, char *label, int, void *fp, void *shandle); +void *taosInitUdpClient(char *ip, short port, char *label, int, void *fp, void *shandle); +void taosCleanUpUdpConnection(void *handle); +int taosSendUdpData(uint32_t ip, short port, char *data, int dataLen, void *chandle); +void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, short port); + +void taosFreeMsgHdr(void *hdr); +int taosMsgHdrSize(void *hdr); +void taosSendMsgHdr(void *hdr, int fd); +void taosInitMsgHdr(void **hdr, void *dest, int maxPkts); +void taosSetMsgHdrData(void *hdr, char *data, int dataLen); + +#endif diff --git a/src/rpc/src/thaship.c b/src/rpc/src/thaship.c new file mode 100644 index 000000000000..8940dba60b3c --- /dev/null +++ b/src/rpc/src/thaship.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "stdint.h" +#include "tlog.h" +#include "tmempool.h" + +typedef struct _ip_hash_t { + uint32_t ip; + short port; + int hash; + struct _ip_hash_t *prev; + struct _ip_hash_t *next; + void * data; +} SIpHash; + +typedef struct { + SIpHash **ipHashList; + mpool_h ipHashMemPool; + int maxSessions; +} SHashObj; + +int taosHashIp(void *handle, uint32_t ip, short port) { + SHashObj *pObj = (SHashObj *)handle; + int hash = 0; + + hash = (int)(ip >> 16); + hash += (unsigned short)(ip & 0xFFFF); + hash += (unsigned short)port; + + hash = hash % pObj->maxSessions; + + return hash; +} + +void *taosAddIpHash(void *handle, void *data, uint32_t ip, short port) { + int hash; + SIpHash * pNode; + SHashObj *pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = taosHashIp(pObj, ip, port); + pNode = (SIpHash *)taosMemPoolMalloc(pObj->ipHashMemPool); + pNode->ip = ip; + pNode->port = port; + pNode->data = data; + pNode->prev = 0; + pNode->next = pObj->ipHashList[hash]; + pNode->hash = hash; + + if (pObj->ipHashList[hash] != 0) (pObj->ipHashList[hash])->prev = pNode; + pObj->ipHashList[hash] = pNode; + + return pObj; +} + +void taosDeleteIpHash(void *handle, uint32_t ip, short port) { + int hash; + SIpHash * pNode; + SHashObj *pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + hash = taosHashIp(pObj, ip, port); + + pNode = pObj->ipHashList[hash]; + while (pNode) { + if (pNode->ip == ip && pNode->port == port) break; + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->ipHashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + taosMemPoolFree(pObj->ipHashMemPool, (char *)pNode); + } +} + +void *taosGetIpHash(void *handle, uint32_t ip, short port) { + int hash; + SIpHash * pNode; + SHashObj *pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = taosHashIp(pObj, ip, port); + pNode = pObj->ipHashList[hash]; + + while (pNode) { + if (pNode->ip == ip && pNode->port == port) { + break; + } + pNode = pNode->next; + } + + if (pNode) { + return pNode->data; + } + return NULL; +} + +void *taosOpenIpHash(int maxSessions) { + SIpHash **ipHashList; + mpool_h ipHashMemPool; + SHashObj *pObj; + + ipHashMemPool = taosMemPoolInit(maxSessions, sizeof(SIpHash)); + if (ipHashMemPool == 0) return NULL; + + ipHashList = calloc(sizeof(SIpHash *), (size_t)maxSessions); + if (ipHashList == 0) { + taosMemPoolCleanUp(ipHashMemPool); + return NULL; + } + + pObj = malloc(sizeof(SHashObj)); + if (pObj == NULL) { + taosMemPoolCleanUp(ipHashMemPool); + free(ipHashList); + return NULL; + } + + pObj->maxSessions = maxSessions; + pObj->ipHashMemPool = ipHashMemPool; + pObj->ipHashList = ipHashList; + + return pObj; +} + +void taosCloseIpHash(void *handle) { + SHashObj *pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + if (pObj->ipHashMemPool) taosMemPoolCleanUp(pObj->ipHashMemPool); + + if (pObj->ipHashList) free(pObj->ipHashList); + + memset(pObj, 0, sizeof(SHashObj)); + free(pObj); +} diff --git a/src/rpc/src/tmsghdr.c b/src/rpc/src/tmsghdr.c new file mode 100644 index 000000000000..960d1fc5f6a5 --- /dev/null +++ b/src/rpc/src/tmsghdr.c @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +void taosFreeMsgHdr(void *hdr) { + struct msghdr *msgHdr = (struct msghdr *)hdr; + free(msgHdr->msg_iov); +} + +int taosMsgHdrSize(void *hdr) { + struct msghdr *msgHdr = (struct msghdr *)hdr; + return (int)msgHdr->msg_iovlen; +} + +void taosSendMsgHdr(void *hdr, int fd) { + struct msghdr *msgHdr = (struct msghdr *)hdr; + sendmsg(fd, msgHdr, 0); + msgHdr->msg_iovlen = 0; +} + +void taosInitMsgHdr(void **hdr, void *dest, int maxPkts) { + struct msghdr *msgHdr = (struct msghdr *)malloc(sizeof(struct msghdr)); + memset(msgHdr, 0, sizeof(struct msghdr)); + *hdr = msgHdr; + struct sockaddr_in *destAdd = (struct sockaddr_in *)dest; + + msgHdr->msg_name = destAdd; + msgHdr->msg_namelen = sizeof(struct sockaddr_in); + int size = (int)sizeof(struct iovec) * maxPkts; + msgHdr->msg_iov = (struct iovec *)malloc((size_t)size); + memset(msgHdr->msg_iov, 0, (size_t)size); +} + +void taosSetMsgHdrData(void *hdr, char *data, int dataLen) { + struct msghdr *msgHdr = (struct msghdr *)hdr; + msgHdr->msg_iov[msgHdr->msg_iovlen].iov_base = data; + msgHdr->msg_iov[msgHdr->msg_iovlen].iov_len = (size_t)dataLen; + msgHdr->msg_iovlen++; +} diff --git a/src/rpc/src/trpc.c b/src/rpc/src/trpc.c new file mode 100644 index 000000000000..4c16ada51c07 --- /dev/null +++ b/src/rpc/src/trpc.c @@ -0,0 +1,1382 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "shash.h" +#include "taosmsg.h" +#include "tidpool.h" +#include "tlog.h" +#include "tlog.h" +#include "tmd5.h" +#include "tmempool.h" +#include "trpc.h" +#include "tsdb.h" +#include "tsocket.h" +#include "ttcpclient.h" +#include "ttcpserver.h" +#include "ttime.h" +#include "ttimer.h" +#include "ttimer.h" +#include "tudp.h" +#include "tutil.h" + +typedef struct _msg_node { + struct _msg_node *next; + void * ahandle; + int msgLen; +} SMsgNode; + +typedef struct { + void * signature; + int chann; // channel ID + int sid; // session ID + uint32_t ownId; // own link ID + uint32_t peerId; // peer link ID + char meterId[TSDB_UNI_LEN]; + char spi; + char encrypt; + uint8_t secret[TSDB_KEY_LEN]; + uint8_t ckey[TSDB_KEY_LEN]; + + short localPort; // for UDP only + uint32_t peerUid; + uint32_t peerIp; // peer IP + short peerPort; // peer port + char peerIpstr[20]; // peer IP string + uint16_t tranId; // outgoing transcation ID, for build message + uint16_t outTranId; // outgoing transcation ID + uint16_t inTranId; + char outType; + char inType; + char closing; + char rspReceived; + void * chandle; // handle passed by TCP/UDP connection layer + void * ahandle; // handle returned by upper app layter + int retry; + int tretry; // total retry + void * pTimer; + void * pIdleTimer; + char * pRspMsg; + char * pQuickRsp; + int rspMsgLen; + SMsgNode * pMsgNode; + SMsgNode * pHead, *pTail; + struct rpc_server *pServer; +} SRpcConn; + +typedef struct { + int sessions; + SRpcConn * connList; + void * idPool; + void * tmrCtrl; + void * hash; + pthread_mutex_t mutex; +} SRpcChann; + +typedef struct rpc_server { + void *shandle; // returned handle from lower layer during initialization + void *qhandle; // for scheduler + int bits; // number of bits for session ID + int mask; + int numOfChanns; + int numOfThreads; + int idMgmt; // ID management method + int type; + int idleTime; // milliseconds; + int noFree; // do not free the request msg when rsp is received + int index; // for UDP server, next thread for new connection + short localPort; + char label[12]; + void *(*fp)(char *, void *ahandle, void *thandle); + void (*efp)(int); // FP to report error + int (*afp)(char *meterId, char *spi, char *encrypt, uint8_t *secret, uint8_t *ckey); // FP to retrieve auth info + SRpcChann *channList; +} STaosRpc; + +// configurable +int taosDebugFlag = 131; +int tsRpcTimer = 300; +int tsRpcMaxTime = 600; // seconds; + +// not configurable +int tsRpcMaxRetry; +int tsRpcHeadSize; + +void *(*taosInitConn[])(char *ip, short port, char *label, int threads, void *fp, void *shandle) = { + taosInitUdpServer, taosInitUdpClient, taosInitTcpServer, taosInitTcpClient}; + +void (*taosCleanUpConn[])(void *thandle) = {taosCleanUpUdpConnection, taosCleanUpUdpConnection, taosCleanUpTcpServer, + taosCleanUpTcpClient}; + +int (*taosSendData[])(uint32_t ip, short port, char *data, int len, void *chandle) = { + taosSendUdpData, taosSendUdpData, taosSendTcpServerData, taosSendTcpClientData}; + +void *(*taosOpenConn[])(void *shandle, void *thandle, char *ip, short port) = { + taosOpenUdpConnection, taosOpenUdpConnection, NULL, taosOpenTcpClientConnection, +}; + +void (*taosCloseConn[])(void *chandle) = {NULL, NULL, taosCloseTcpServerConnection, taosCloseTcpClientConnection}; + +int taosReSendRspToPeer(SRpcConn *pConn); +void taosProcessTaosTimer(void *, void *); +void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, void *shandle, void *thandle, + void *chandle); +int taosSendDataToPeer(SRpcConn *pConn, char *data, int dataLen); +void taosProcessSchedMsg(SSchedMsg *pMsg); +int taosAuthenticateMsg(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey); +int taosBuildAuthHeader(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey); + +char *taosBuildReqHeader(void *param, char type, char *msg) { + STaosHeader *pHeader; + SRpcConn * pConn = (SRpcConn *)param; + + if (pConn == NULL || pConn->signature != pConn) { + tError("pConn:%p, connection has to be openned first before building a message", pConn); + return NULL; + } + + pHeader = (STaosHeader *)(msg + sizeof(SMsgNode)); + pHeader->version = 1; + pHeader->msgType = type; + pHeader->spi = 0; + pHeader->tcp = 0; + pHeader->encrypt = 0; + if (pConn->tranId == 0) __sync_fetch_and_add(&pConn->tranId, 1); + pHeader->tranId = __sync_fetch_and_add(&pConn->tranId, 1); + + pHeader->sourceId = pConn->ownId; + pHeader->destId = pConn->peerId; + pHeader->port = 0; + pHeader->uid = (uint32_t)pConn; + + memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); + + return (char *)pHeader->content; +} + +char *taosBuildReqMsgWithSize(void *param, char type, int size) { + STaosHeader *pHeader; + char * pMsg; + SRpcConn * pConn = (SRpcConn *)param; + + if (pConn == NULL || pConn->signature != pConn) { + tError("pConn:%p, connection has to be openned first before building a message", pConn); + return NULL; + } + + size += sizeof(SMsgNode) + sizeof(STaosHeader) + sizeof(STaosDigest); + pMsg = (char *)malloc((size_t)size); + memset(pMsg, 0, (size_t)size); + pHeader = (STaosHeader *)(pMsg + sizeof(SMsgNode)); + pHeader->version = 1; + pHeader->msgType = type; + pHeader->spi = 0; + pHeader->tcp = 0; + pHeader->encrypt = 0; + if (pConn->tranId == 0) __sync_fetch_and_add(&pConn->tranId, 1); + pHeader->tranId = __sync_fetch_and_add(&pConn->tranId, 1); + + pHeader->sourceId = pConn->ownId; + pHeader->destId = pConn->peerId; + pHeader->uid = (uint32_t)pConn; + memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); + + return (char *)pHeader->content; +} + +char *taosBuildRspMsgWithSize(void *param, char type, int size) { + STaosHeader *pHeader; + char * pMsg; + SRpcConn * pConn = (SRpcConn *)param; + + if (pConn == NULL || pConn->signature != pConn) { + tError("pConn:%p, connection has to be opened first before building a message", pConn); + return NULL; + } + + size += sizeof(SMsgNode) + sizeof(STaosHeader) + sizeof(STaosDigest); + pMsg = (char *)malloc((size_t)size); + memset(pMsg, 0, (size_t)size); + pHeader = (STaosHeader *)pMsg; + pHeader->version = 1; + pHeader->msgType = type; + pHeader->spi = 0; + pHeader->tcp = 0; + pHeader->encrypt = 0; + pHeader->tranId = pConn->inTranId; + pHeader->sourceId = pConn->ownId; + pHeader->destId = pConn->peerId; + pHeader->uid = 0; + memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); + + return (char *)pHeader->content; +} + +int taosSendSimpleRsp(void *thandle, char rsptype, char code) { + char *pMsg, *pStart; + int msgLen; + + if (thandle == NULL) { + tError("connection is gone, response could not be sent"); + return -1; + } + + pStart = taosBuildRspMsgWithSize(thandle, rsptype, 32); + pMsg = pStart; + + *pMsg = code; + pMsg++; + + msgLen = (int)(pMsg - pStart); + taosSendMsgToPeer(thandle, pStart, msgLen); + + return msgLen; +} + +int taosSendQuickRsp(void *thandle, char rsptype, char code) { + char * pCont; + int contLen; + STaosHeader *pHeader; + char * msg; + int msgLen; + SRpcConn * pConn = (SRpcConn *)thandle; + + pCont = taosBuildRspMsgWithSize(thandle, rsptype, 32); + if (pCont == NULL) return 0; + + *pCont = code; + contLen = 1; + + pHeader = (STaosHeader *)(pCont - sizeof(STaosHeader)); + msg = (char *)pHeader; + msgLen = contLen + (int32_t)sizeof(STaosHeader); + + if (pConn->spi) { + // add auth part + pHeader->spi = pConn->spi; + STaosDigest *pDigest = (STaosDigest *)(pCont + contLen); + pDigest->timeStamp = htonl(taosGetTimestampSec()); + msgLen += sizeof(STaosDigest); + pHeader->msgLen = (int32_t)htonl((uint32_t)msgLen); + taosBuildAuthHeader((uint8_t *)pHeader, msgLen - TSDB_AUTH_LEN, pDigest->auth, pConn->secret); + } else { + pHeader->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + + tfree(pConn->pQuickRsp); + pConn->pQuickRsp = msg; + taosSendDataToPeer(pConn, (char *)pHeader, msgLen); + + return msgLen; +} + +void *taosOpenRpc(SRpcInit *pRpc) { + STaosRpc *pServer; + + tsRpcMaxRetry = tsRpcMaxTime * 1000 / tsRpcTimer; + tsRpcHeadSize = sizeof(STaosHeader) + sizeof(SMsgNode); + + pServer = (STaosRpc *)malloc(sizeof(STaosRpc)); + if (pServer == NULL) return NULL; + memset(pServer, 0, sizeof(STaosRpc)); + + pServer->bits = pRpc->bits; + pServer->mask = (1 << (pRpc->bits)) - 1; + pServer->numOfChanns = pRpc->numOfChanns; + strcpy(pServer->label, pRpc->label); + pServer->fp = pRpc->fp; + pServer->idMgmt = pRpc->idMgmt; + pServer->type = pRpc->connType; + pServer->idleTime = pRpc->idleTime; + pServer->noFree = pRpc->noFree; + pServer->numOfThreads = pRpc->numOfThreads; + if (pServer->numOfThreads > TSDB_MAX_RPC_THREADS) { + pServer->numOfThreads = TSDB_MAX_RPC_THREADS; + pRpc->numOfThreads = TSDB_MAX_RPC_THREADS; + } + pServer->localPort = pRpc->localPort; + pServer->qhandle = pRpc->qhandle; + pServer->efp = pRpc->efp; + pServer->afp = pRpc->afp; + + int size = (int)sizeof(SRpcChann) * pRpc->numOfChanns; + pServer->channList = (SRpcChann *)malloc((size_t)size); + memset(pServer->channList, 0, (size_t)size); + + pServer->shandle = (*taosInitConn[pRpc->connType])(pRpc->localIp, pRpc->localPort, pRpc->label, pRpc->numOfThreads, + taosProcessDataFromPeer, pServer); + if (pServer->shandle == NULL) { + tError("%s, failed to init network, %s:%d", pRpc->label, pRpc->localIp, pRpc->localPort); + taosCloseRpc(pServer); + return NULL; + } + + if (pServer->numOfChanns == 1) taosOpenRpcChann(pServer, 0, pRpc->sessionsPerChann); + + tTrace("%s RPC is openned, numOfThreads:%d", pRpc->label, pRpc->numOfThreads); + + return pServer; +} + +int taosOpenRpcChann(void *handle, int cid, int sessions) { + STaosRpc * pServer = (STaosRpc *)handle; + SRpcChann *pChann; + + if (pServer == NULL) return -1; + if (cid >= pServer->numOfChanns || cid < 0) { + tError("%s: cid:%d, chann is out of range, max:%d", pServer->label, cid, pServer->numOfChanns); + return -1; + } + + pChann = pServer->channList + cid; + memset(pChann, 0, sizeof(SRpcChann)); + + int size = (int)sizeof(SRpcConn) * sessions; + pChann->connList = (SRpcConn *)malloc((size_t)size); + if (pChann->connList == NULL) { + tError("%s cid:%d, failed to allocate memory for taos connections", pServer->label, cid); + return -1; + } + memset(pChann->connList, 0, (size_t)size); + + if (pServer->idMgmt == TAOS_ID_FREE) { + pChann->idPool = taosInitIdPool(sessions); + if (pChann->idPool == NULL) { + tError("%s cid:%d, failed to init ID pool", pServer->label, cid); + return -1; + } + } + + pChann->tmrCtrl = taosTmrInit(sessions * 2 + 1, 50, 10000, pServer->label); + if (pChann->tmrCtrl == NULL) { + tError("%s cid:%d, failed to init timers", pServer->label, cid); + return -1; + } + + pChann->hash = taosInitStrHash(sessions, sizeof(pChann), taosHashString); + if (pChann->hash == NULL) { + tError("%s cid:%d, failed to init string hash", pServer->label, cid); + return -1; + } + + pthread_mutex_init(&pChann->mutex, NULL); + pChann->sessions = sessions; + + return 0; +} + +void taosCloseRpcChann(void *handle, int cid) { + STaosRpc * pServer = (STaosRpc *)handle; + SRpcChann *pChann; + + if (pServer == NULL) return; + if (cid >= pServer->numOfChanns || cid < 0) { + tError("%s cid:%d, chann is out of range, max:%d", pServer->label, cid, pServer->numOfChanns); + return; + } + + pChann = pServer->channList + cid; + + for (int i = 0; i < pChann->sessions; ++i) { + if (pChann->connList[i].signature != NULL) { + taosCloseRpcConn((void *)(pChann->connList + i)); + } + } + + taosCleanUpStrHash(pChann->hash); + taosTmrCleanUp(pChann->tmrCtrl); + taosIdPoolCleanUp(pChann->idPool); + tfree(pChann->connList); + pthread_mutex_destroy(&pChann->mutex); + + memset(pChann, 0, sizeof(SRpcChann)); +} + +void taosCloseRpcConn(void *thandle) { + SRpcConn *pConn = (SRpcConn *)thandle; + if (pConn == NULL) return; + + STaosRpc *pServer = pConn->pServer; + if (pConn->signature != thandle || pServer == NULL) return; + if (pConn->closing) return; + SRpcChann *pChann = pServer->channList + pConn->chann; + + pthread_mutex_lock(&pChann->mutex); + + pConn->closing = 1; + pConn->signature = NULL; + + if (taosCloseConn[pServer->type]) (*taosCloseConn[pServer->type])(pConn->chandle); + + taosTmrStopA(&pConn->pTimer); + taosTmrStopA(&pConn->pIdleTimer); + tfree(pConn->pRspMsg); + + if (pServer->noFree == 0) free(pConn->pMsgNode); + pConn->pMsgNode = NULL; + + tfree(pConn->pQuickRsp); + + SMsgNode *pMsgNode; + while (pConn->pHead) { + pMsgNode = pConn->pHead; + pConn->pHead = pConn->pHead->next; + memset(pMsgNode, 0, sizeof(SMsgNode)); + if (pServer->noFree == 0) free(pMsgNode); + } + + char hashstr[40] = {0}; + sprintf(hashstr, "%x:%x:%x", pConn->peerIp, pConn->peerUid, pConn->peerId); + taosDeleteStrHash(pChann->hash, hashstr); + + tTrace("%s cid:%d sid:%d id:%s, TAOS connection closed, pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pConn); + int freeId = pConn->sid; + memset(pConn, 0, sizeof(SRpcConn)); + + if (pChann->idPool) taosFreeId(pChann->idPool, freeId); + + pthread_mutex_unlock(&pChann->mutex); +} + +int taosGetRpcConn(int chann, int sid, char *meterId, STaosRpc *pServer, SRpcConn **ppConn, char req, char *hashstr) { + SRpcConn * pConn = NULL; + SRpcChann *pChann; + + if (pServer == NULL) return -1; + pChann = pServer->channList + chann; + + if (pServer->idMgmt == TAOS_ID_FREE) { + if ((sid == 0) || (pChann->connList[sid].signature == NULL)) { + if (req) { + int osid = sid; + SRpcConn **ppConn = (SRpcConn **)taosGetStrHashData(pChann->hash, hashstr); + if (ppConn) pConn = *ppConn; + if (pConn == NULL) { + sid = taosAllocateId(pChann->idPool); + if (sid <= 0) { + tError("%s cid:%d, maximum number of sessions:%d is reached", pServer->label, chann, pChann->sessions); + return TSDB_CODE_MAX_SESSIONS; + } else { + tTrace("%s cid:%d sid:%d, ID allocated, used:%d, old id:%d", pServer->label, chann, sid, + taosIdPoolNumOfUsed(pChann->idPool), osid); + } + } else { + sid = pConn->sid; + tTrace("%s cid:%d sid:%d id:%s, session is already there", pServer->label, pConn->chann, pConn->sid, + pConn->meterId); + } + } else { + return TSDB_CODE_UNEXPECTED_RESPONSE; + } + } + } + + pConn = pChann->connList + sid; + + if (pConn->signature == NULL) { + memset(pConn, 0, sizeof(SRpcConn)); + pConn->signature = pConn; + memcpy(pConn->meterId, meterId, tListLen(pConn->meterId)); + pConn->pServer = pServer; + pConn->chann = chann; + pConn->sid = sid; + pConn->tranId = (uint16_t)(rand() & 0xFFFF); + pConn->ownId = htonl((uint32_t)((pConn->chann << pServer->bits) + pConn->sid)); + if (pServer->afp) { + int ret = (*pServer->afp)(meterId, &pConn->spi, &pConn->encrypt, pConn->secret, pConn->ckey); + if (ret != 0) { + tTrace("%s cid:%d sid:%d id:%s, meterId not there pConn:%p", pServer->label, chann, sid, pConn->meterId, + pConn->localPort, pConn); + return ret; + } + } + + if ((pServer->type == TAOS_CONN_UDPC || pServer->type == TAOS_CONN_UDPS) && pServer->numOfThreads > 1 && + pServer->localPort) { + // UDP server, assign to new connection + pServer->index = (pServer->index + 1) % pServer->numOfThreads; + pConn->localPort = (int16_t)(pServer->localPort + pServer->index); + } + + taosAddStrHash(pChann->hash, hashstr, (char *)&pConn); + tTrace("%s cid:%d sid:%d id:%s, TAOS connection is allocated, localPort:%d pConn:%p", + pServer->label, chann, sid, pConn->meterId, pConn->localPort, pConn); + } else { + if (memcmp(pConn->meterId, meterId, tListLen(pConn->meterId)) != 0) { + tTrace("%s cid:%d sid:%d id:%s, meterId is not matched, received:%s", pServer->label, chann, sid, pConn->meterId, + meterId); + return TSDB_CODE_MISMATCHED_METER_ID; + } + } + + *ppConn = pConn; + + return TSDB_CODE_SUCCESS; +} + +void *taosOpenRpcConn(SRpcConnInit *pInit, uint8_t *code) { + SRpcConn *pConn; + STaosRpc *pServer = (STaosRpc *)pInit->shandle; + + *code = (uint8_t)(taosGetRpcConn(pInit->cid, pInit->sid, pInit->meterId, pServer, &pConn, 1, NULL)); + if (*code == TSDB_CODE_MAX_SESSIONS) *code = TSDB_CODE_MAX_CONNECTIONS; + if (*code != TSDB_CODE_SUCCESS) return NULL; + + if (pConn->peerId == 0) pConn->peerId = pInit->peerId; + + strcpy(pConn->peerIpstr, pInit->peerIp); + pConn->peerIp = inet_addr(pInit->peerIp); + pConn->peerPort = pInit->peerPort; + pConn->ahandle = pInit->ahandle; + pConn->spi = pInit->spi; + pConn->encrypt = pInit->encrypt; + if (pConn->spi) memcpy(pConn->secret, pInit->secret, TSDB_KEY_LEN); + + // if it is client, it shall set up connection first + if (taosOpenConn[pServer->type]) { + pConn->chandle = (*taosOpenConn[pServer->type])(pServer->shandle, pConn, pConn->peerIpstr, pConn->peerPort); + if (pConn->chandle) { + tTrace("%s cid:%d sid:%d id:%s, nw connection is set up, ip:%s:%hu localPort:%d pConn:%p", + pServer->label, pConn->chann, pConn->sid, pInit->meterId, pConn->peerIpstr, pConn->peerPort, + pConn->localPort, pConn); + } else { + tError("%s cid:%d sid:%d id:%s, failed to set up nw connection to ip:%s:%hu", pServer->label, pConn->chann, + pConn->sid, pInit->meterId, pConn->peerIpstr, pConn->peerPort); + *code = TSDB_CODE_NETWORK_UNAVAIL; + taosCloseRpcConn(pConn); + pConn = NULL; + } + } + + return pConn; +} + +void taosCloseRpc(void *param) { + STaosRpc *pServer = (STaosRpc *)param; + + (*taosCleanUpConn[pServer->type])(pServer->shandle); + + for (int cid = 0; cid < pServer->numOfChanns; ++cid) taosCloseRpcChann(pServer, cid); + + tfree(pServer->channList); + tfree(pServer); +} + +int taosSetSecurityInfo(int chann, int sid, char *id, int spi, int encrypt, char *secret, char *ckey) { + /* + SRpcConn *pConn; + + pConn = connList[chann*tsSessionsPerChann + sid]; + + if ( pConn == NULL ) { + pConn = (SRpcConn *)sizeof(SRpcConn); + + if ( pConn == NULL ) { + tError("failed to allocate memory for taosConn"); + return -1; + } + + memset(pConn, 0, sizeof(SRpcConn)); + pConn->chann = chann; + pConn->sid = sid; + } + + pConn->spi = spi; + pConn->encrypt = encrypt; + memcpy(pConn->secret, pConn->secret, TSDB_KEY_LEN); + memcpy(pConn->cipheringKey, ckey, TSDB_KEY_LEN); + memcpy(pConn->meterId, id, TSDB_METER_ID_LEN); + */ + return -1; +} + +int taosSendDataToPeer(SRpcConn *pConn, char *data, int dataLen) { + int writtenLen = 0; + STaosRpc * pServer = pConn->pServer; + STaosHeader *pHeader = (STaosHeader *)data; + + if (pConn->signature != pConn || pServer == NULL) return -1; + + if (pHeader->msgType & 1) { + if (pHeader->msgType < TSDB_MSG_TYPE_HEARTBEAT || (taosDebugFlag & 16)) + tTrace( + "%s cid:%d sid:%d id:%s, %s is sent to %s:%hu, len:%d tranId:%d " + "pConn:%p", + pServer->label, pConn->chann, pConn->sid, pConn->meterId, taosMsg[pHeader->msgType], pConn->peerIpstr, + pConn->peerPort, dataLen, pHeader->tranId, pConn); + } else { + if (pHeader->msgType < TSDB_MSG_TYPE_HEARTBEAT || (taosDebugFlag & 16)) + tTrace( + "%s cid:%d sid:%d id:%s, %s is sent to %s:%hu, code:%u len:%d " + "tranId:%d pConn:%p", + pServer->label, pConn->chann, pConn->sid, pConn->meterId, taosMsg[pHeader->msgType], pConn->peerIpstr, + pConn->peerPort, (uint8_t)pHeader->content[0], dataLen, pHeader->tranId, pConn); + } + + writtenLen = (*taosSendData[pServer->type])(pConn->peerIp, pConn->peerPort, (char *)pHeader, dataLen, pConn->chandle); + + if (writtenLen != dataLen) + tError("%s cid:%d sid:%d id:%s, dataLen:%d writtenLen:%d, not good, reason:%s", pServer->label, pConn->chann, + pConn->sid, pConn->meterId, dataLen, writtenLen, strerror(errno)); + // assert ( writtenLen == dataLen ); + tDump(data, dataLen); + tTrace("%s msg sent, len:%d source:0x%08x dest:0x%08x tranId:%d pConn:%p", pServer->label, dataLen, pHeader->sourceId, + pHeader->destId, pHeader->tranId, pConn); + + return 0; +} + +void taosProcessResponse(SRpcConn *pConn) { + STaosHeader *pHeader; + char * msg = NULL; + int msgLen = 0; + + if (pConn == NULL) return; + STaosRpc *pServer = pConn->pServer; + if (pConn->signature != pConn || pServer == NULL) return; + SRpcChann *pChann = pServer->channList + pConn->chann; + + pthread_mutex_lock(&pChann->mutex); + + pConn->outType = 0; + pConn->rspReceived = 0; + if (pServer->noFree == 0) tfree(pConn->pMsgNode); + pConn->pMsgNode = NULL; + + if (pConn->pHead) { + SMsgNode *pMsgNode = pConn->pHead; + // assert ( pMsgNode->msgLen >= sizeof(STaosHeader) && pMsgNode->msgLen < RPC_MAX_UDP_SIZE); + if (pMsgNode->msgLen >= sizeof(STaosHeader)) { + pConn->pMsgNode = pMsgNode; + pConn->pHead = pMsgNode->next; + if (pMsgNode->ahandle) pConn->ahandle = pMsgNode->ahandle; + + pHeader = (STaosHeader *)((char *)pMsgNode + sizeof(SMsgNode)); + pConn->outType = pHeader->msgType; + pConn->outTranId = pHeader->tranId; + + msg = (char *)pHeader; + msgLen = pMsgNode->msgLen; + + } else { + tError("%s cid:%d sid:%d id:%s, invalid msgLen:%d pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pMsgNode->msgLen, pConn); + pConn->pHead = NULL; + } + + if (pConn->pHead == NULL) pConn->pTail = NULL; + } + + pthread_mutex_unlock(&pChann->mutex); + + if (msg) { + taosSendDataToPeer(pConn, msg, msgLen); + taosTmrReset(taosProcessTaosTimer, tsRpcTimer, pConn, pChann->tmrCtrl, &pConn->pTimer); + } +} + +int taosProcessMsgHeader(STaosHeader *pHeader, SRpcConn **ppConn, STaosRpc *pServer, int dataLen, uint32_t ip, + short port, void *chandle) { + int chann, sid, code = 0; + SRpcConn * pConn = NULL; + SRpcChann *pChann; + int msgLen; + char hashstr[40] = {0}; + int reSend = 0; + + *ppConn = NULL; + uint32_t destId = htonl(pHeader->destId); + chann = destId >> pServer->bits; + sid = destId & pServer->mask; + + if (pHeader->msgType >= TSDB_MSG_TYPE_MAX || pHeader->msgType <= 0) { + tTrace("%s cid:%d sid:%d, invalid message type:%d", pServer->label, chann, sid, pHeader->msgType); + return TSDB_CODE_INVALID_MSG_TYPE; + } + + msgLen = (int32_t)htonl((uint32_t)pHeader->msgLen); + if (dataLen != msgLen) { + tTrace("%s cid:%d sid:%d, %s has invalid length, dataLen:%d, msgLen:%d", pServer->label, chann, sid, + taosMsg[pHeader->msgType], dataLen, msgLen); + return TSDB_CODE_INVALID_MSG_LEN; + } + + if (chann < 0 || chann >= pServer->numOfChanns) { + tTrace("%s cid:%d sid:%d, chann is out of range, max:%d, %s discarded", pServer->label, chann, sid, + pServer->numOfChanns, taosMsg[pHeader->msgType]); + return TSDB_CODE_INVALID_SESSION_ID; + } + + pChann = pServer->channList + chann; + if (pChann->sessions == 0) { + tTrace("%s cid:%d, chann is not activated yet, %s discarded", pServer->label, chann, taosMsg[pHeader->msgType]); + if (pServer->efp) (*(pServer->efp))(chann); + return TSDB_CODE_NOT_ACTIVE_SESSION; + } + + if (sid < 0 || sid >= pChann->sessions) { + tTrace("%s cid:%d sid:%d, sid is out of range, max sid:%d, %s discarded", pServer->label, chann, sid, + pChann->sessions, taosMsg[pHeader->msgType]); + return TSDB_CODE_INVALID_SESSION_ID; + } + + // if ( pHeader->tcp ) return TSDB_CODE_ALREADY_PROCESSED; + if (sid == 0) sprintf(hashstr, "%x:%x:%x", ip, pHeader->uid, pHeader->sourceId); + + pthread_mutex_lock(&pChann->mutex); + + code = taosGetRpcConn(chann, sid, pHeader->meterId, pServer, &pConn, pHeader->msgType & 1, hashstr); + if (code != TSDB_CODE_SUCCESS) goto _exit; + + *ppConn = pConn; + sid = pConn->sid; + + if (pConn->peerIp != ip) { + pConn->peerIp = ip; + char ipstr[20] = {0}; + tinet_ntoa(ipstr, ip); + strcpy(pConn->peerIpstr, ipstr); + } + + if (pHeader->uid) pConn->peerUid = pHeader->uid; + + if (port) pConn->peerPort = port; + + if (pHeader->port) // port maybe changed by the peer + pConn->peerPort = pHeader->port; + + if (chandle) pConn->chandle = chandle; + + if (pHeader->tcp) { + tTrace("%s cid:%d sid:%d id:%s, content will be transfered via TCP pConn:%p", pServer->label, chann, sid, + pConn->meterId, pConn); + if (pConn->outType) taosTmrReset(taosProcessTaosTimer, tsRpcTimer, pConn, pChann->tmrCtrl, &pConn->pTimer); + code = TSDB_CODE_ALREADY_PROCESSED; + goto _exit; + } + + if (pConn->spi != 0) { + if (pHeader->spi == pConn->spi) { + // authentication + STaosDigest *pDigest = (STaosDigest *)((char *)pHeader + dataLen - sizeof(STaosDigest)); + + int32_t delta; + delta = (int32_t)htonl(pDigest->timeStamp); + delta -= (int32_t)taosGetTimestampSec(); + if (abs(delta) > 900) { + tWarn("%s cid:%d sid:%d id:%s, time diff:%d is too big, msg discarded pConn:%p, timestamp:%d", + pServer->label, chann, sid, pConn->meterId, delta, pConn, htonl(pDigest->timeStamp)); + // the requirement of goldwind, should not return error in this case + code = TSDB_CODE_INVALID_TIME_STAMP; + goto _exit; + } + + if (taosAuthenticateMsg((uint8_t *)pHeader, dataLen - TSDB_AUTH_LEN, pDigest->auth, pConn->secret) < 0) { + tTrace("%s cid:%d sid:%d id:%s, authentication failed, msg discarded pConn:%p", + pServer->label, chann, sid, pConn->meterId, pConn); + code = TSDB_CODE_AUTH_FAILURE; + goto _exit; + } + } else { + // if it is request or response with code 0, msg shall be discarded + if ((pHeader->msgType & 1) || (pHeader->content[0] == 0)) { + tTrace("%s cid:%d sid:%d id:%s, auth spi not matched, msg discarded pConn:%p", + pServer->label, chann, sid, pConn->meterId, pConn); + code = TSDB_CODE_AUTH_FAILURE; + goto _exit; + } + } + } + + if (pHeader->msgType != TSDB_MSG_TYPE_REG && pHeader->encrypt) { + // decrypt here + } + + pHeader->destId = pConn->ownId; // destId maybe 0, it shall be changed + + if (pHeader->msgType & 1) { + if (pConn->peerId == 0) { + pConn->peerId = pHeader->sourceId; + } else { + if (pConn->peerId != pHeader->sourceId) { + tTrace("%s cid:%d sid:%d id:%s, source Id is changed, old:0x%08x new:0x%08x pConn:%p", + pServer->label, chann, sid, pConn->meterId, pConn->peerId, pHeader->sourceId, pConn); + code = TSDB_CODE_SESSION_ALREADY_EXIST; + goto _exit; + } + } + + if (pConn->inTranId == pHeader->tranId) { + if (pConn->inType == pHeader->msgType) { + tTrace("%s cid:%d sid:%d id:%s, %s is retransmitted, pConn:%p", pServer->label, chann, sid, pConn->meterId, + taosMsg[pHeader->msgType], pConn); + taosSendQuickRsp(pConn, (char)(pHeader->msgType + 1), TSDB_CODE_ACTION_IN_PROGRESS); + } else if (pConn->inType == 0) { + tTrace("%s cid:%d sid:%d id:%s, %s is already processed, tranId:%d pConn:%p", + pServer->label, chann, sid, pConn->meterId, taosMsg[pHeader->msgType], pConn->inTranId, pConn); + reSend = 1; + } else { + tTrace("%s cid:%d sid:%d id:%s, mismatched message %s and tranId pConn:%p", pServer->label, chann, sid, + pConn->meterId, taosMsg[pHeader->msgType], pConn); + } + + // do not reply any message + code = TSDB_CODE_ALREADY_PROCESSED; + goto _exit; + } + + if (pConn->inType != 0) { + tTrace("%s cid:%d sid:%d id:%s, last session is not finished, inTranId:%d tranId:%d pConn:%p", + pServer->label, chann, sid, pConn->meterId, pConn->inTranId, pHeader->tranId, pConn); + code = TSDB_CODE_LAST_SESSION_NOT_FINISHED; + goto _exit; + } + + pConn->inTranId = pHeader->tranId; + pConn->inType = pHeader->msgType; + + if (sid == 0) // send a response first + taosSendQuickRsp(pConn, (char)(pConn->inType + 1), TSDB_CODE_ACTION_IN_PROGRESS); + + } else { + // response from taos + pConn->peerId = pHeader->sourceId; + + if (pConn->outType == 0) { + code = TSDB_CODE_UNEXPECTED_RESPONSE; + goto _exit; + } + + if (pHeader->tranId != pConn->outTranId) { + code = TSDB_CODE_INVALID_TRAN_ID; + goto _exit; + } + + if (pHeader->msgType != pConn->outType + 1) { + code = TSDB_CODE_INVALID_RESPONSE_TYPE; + goto _exit; + } + + if (*pHeader->content == TSDB_CODE_NOT_READY) { + code = TSDB_CODE_ALREADY_PROCESSED; + goto _exit; + } + + taosTmrStopA(&pConn->pTimer); + pConn->retry = 0; + + if (*pHeader->content == TSDB_CODE_ACTION_IN_PROGRESS || pHeader->tcp) { + if (pConn->tretry <= tsRpcMaxRetry) { + tTrace("%s cid:%d sid:%d id:%s, peer is still processing the transaction, pConn:%p", + pServer->label, chann, sid, pHeader->meterId, pConn); + pConn->tretry++; + taosTmrReset(taosProcessTaosTimer, tsRpcTimer, pConn, pChann->tmrCtrl, &pConn->pTimer); + code = TSDB_CODE_ALREADY_PROCESSED; + goto _exit; + } else { + // peer still in processing, give up + *pHeader->content = TSDB_CODE_TOO_SLOW; + } + } + + pConn->tretry = 0; + if (pConn->rspReceived) { + code = TSDB_CODE_UNEXPECTED_RESPONSE; + goto _exit; + } else { + pConn->rspReceived = 1; + } + } + +_exit: + pthread_mutex_unlock(&pChann->mutex); + + if (reSend) taosReSendRspToPeer(pConn); + + return code; +} + +int taosBuildErrorMsgToPeer(char *pMsg, int code, char *pReply) { + STaosHeader *pRecvHeader, *pReplyHeader; + char * pContent; + uint32_t timeStamp; + int msgLen; + + pRecvHeader = (STaosHeader *)pMsg; + pReplyHeader = (STaosHeader *)pReply; + + pReplyHeader->version = pRecvHeader->version; + pReplyHeader->msgType = (char)(pRecvHeader->msgType + 1); + pReplyHeader->tcp = 0; + pReplyHeader->spi = 0; + pReplyHeader->encrypt = 0; + pReplyHeader->tranId = pRecvHeader->tranId; + pReplyHeader->sourceId = 0; + pReplyHeader->destId = pRecvHeader->sourceId; + memcpy(pReplyHeader->meterId, pRecvHeader->meterId, tListLen(pReplyHeader->meterId)); + + pContent = (char *)pReplyHeader->content; + *pContent = (char)code; + pContent++; + + if (code == TSDB_CODE_INVALID_TIME_STAMP) { + // include a time stamp if client's time is not synchronized well + timeStamp = taosGetTimestampSec(); + memcpy(pContent, &timeStamp, sizeof(timeStamp)); + pContent += sizeof(timeStamp); + } + + msgLen = (int)(pContent - pReply); + pReplyHeader->msgLen = (int32_t)htonl((uint32_t)msgLen); + + return msgLen; +} + +void taosProcessIdleTimer(void *param, void *tmrId) { + SRpcConn *pConn = (SRpcConn *)param; + if (pConn->signature != param) { + tError("idle timer pConn Signature:0x%x, pConn:0x%x not matched", pConn->signature, param); + return; + } + + STaosRpc * pServer = pConn->pServer; + SRpcChann *pChann = pServer->channList + pConn->chann; + if (pConn->pIdleTimer != tmrId) { + tTrace("%s cid:%d sid:%d id:%s, idle timer:%p already processed pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, tmrId, pConn); + return; + } + + pthread_mutex_lock(&pChann->mutex); + + tTrace("%s cid:%d sid:%d id:%s, close the connection since no activity pConn:%p", pServer->label, pConn->chann, + pConn->sid, pConn->meterId, pConn); + if (pConn->rspReceived == 0) { + pConn->rspReceived = 1; + + SSchedMsg schedMsg; + schedMsg.fp = taosProcessSchedMsg; + schedMsg.msg = NULL; + schedMsg.ahandle = pConn->ahandle; + schedMsg.thandle = pConn; + taosScheduleTask(pServer->qhandle, &schedMsg); + } + + pthread_mutex_unlock(&pChann->mutex); +} + +void *taosProcessDataFromPeer(char *data, int dataLen, uint32_t ip, short port, void *shandle, void *thandle, + void *chandle) { + STaosHeader *pHeader; + uint8_t code; + SRpcConn * pConn = (SRpcConn *)thandle; + STaosRpc * pServer = (STaosRpc *)shandle; + int msgLen; + char pReply[128]; + SSchedMsg schedMsg; + int chann, sid; + + tDump(data, dataLen); + + if (ip == 0 && taosCloseConn[pServer->type]) { + // it means the connection is broken + if (pConn) { + tTrace("%s cid:%d sid:%d id:%s, underlying link is gone pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pConn); + pConn->rspReceived = 1; + pConn->chandle = NULL; + schedMsg.fp = taosProcessSchedMsg; + schedMsg.msg = NULL; + schedMsg.ahandle = pConn->ahandle; + schedMsg.thandle = pConn; + taosScheduleTask(pServer->qhandle, &schedMsg); + } + tfree(data); + return NULL; + } + + pHeader = (STaosHeader *)data; + tTrace("%s msg received, len:%d source:0x%08x dest:0x%08x tranId:%d", pServer->label, dataLen, pHeader->sourceId, + pHeader->destId, pHeader->tranId); + msgLen = (int32_t)htonl((uint32_t)pHeader->msgLen); + + code = (uint8_t)taosProcessMsgHeader(pHeader, &pConn, pServer, dataLen, ip, port, chandle); + + pHeader->destId = htonl(pHeader->destId); + chann = pHeader->destId >> pServer->bits; + sid = pHeader->destId & pServer->mask; + + if (pConn && pServer->idleTime) { + SRpcChann *pChann = pServer->channList + pConn->chann; + taosTmrReset(taosProcessIdleTimer, pServer->idleTime, pConn, pChann->tmrCtrl, &pConn->pIdleTimer); + } + + if (code == TSDB_CODE_ALREADY_PROCESSED) { + tTrace("%s cid:%d sid:%d id:%s, %s wont be processed tranId:%d pConn:%p", pServer->label, chann, sid, + pHeader->meterId, taosMsg[pHeader->msgType], pHeader->tranId, pConn); + free(data); + return pConn; + } + + if (pHeader->msgType < TSDB_MSG_TYPE_HEARTBEAT || (taosDebugFlag & 16)) { + tTrace("%s cid:%d sid:%d id:%s, %s received from 0x%x:%hu, parse code:%u, first:%u len:%d tranId:%d pConn:%p", + pServer->label, chann, sid, pHeader->meterId, taosMsg[pHeader->msgType], ip, port, code, pHeader->content[0], + dataLen, pHeader->tranId, pConn); + } + + if (code != 0) { + // parsing error + + if (pHeader->msgType & 1) { + msgLen = taosBuildErrorMsgToPeer(data, code, pReply); + (*taosSendData[pServer->type])(ip, port, pReply, msgLen, chandle); + tTrace("%s cid:%d sid:%d id:%s, %s is sent with error code:%u pConn:%p", pServer->label, chann, sid, + pHeader->meterId, taosMsg[pHeader->msgType + 1], code, pConn); + } else { + tTrace("%s cid:%d sid:%d id:%s, %s is received, parsing error:%u pConn:%p", pServer->label, chann, sid, + pHeader->meterId, taosMsg[pHeader->msgType], code, pConn); + } + + free(data); + } else { + // parsing OK + + // internal communication is based on TAOS protocol, a trick here to make it efficient + pHeader->msgLen = msgLen - (int)sizeof(STaosHeader) + (int)sizeof(SIntMsg); + + if ((pHeader->msgType & 1) == 0 && (pHeader->content[0] == TSDB_CODE_SESSION_ALREADY_EXIST)) { + schedMsg.msg = NULL; // connection shall be closed + } else { + schedMsg.msg = (char *)(&(pHeader->destId)); + // memcpy(schedMsg.msg, (char *)(&(pHeader->destId)), pHeader->msgLen); + } + + if (pHeader->msgType < TSDB_MSG_TYPE_HEARTBEAT || (taosDebugFlag & 16)) { + tTrace("%s cid:%d sid:%d id:%s, %s is put into queue, msgLen:%d pConn:%p pTimer:%p", + pServer->label, chann, sid, pHeader->meterId, taosMsg[pHeader->msgType], pHeader->msgLen, pConn, + pConn->pTimer); + } + + schedMsg.fp = taosProcessSchedMsg; + schedMsg.ahandle = pConn->ahandle; + schedMsg.thandle = pConn; + taosScheduleTask(pServer->qhandle, &schedMsg); + } + + return pConn; +} + +int taosSendMsgToPeerH(void *thandle, char *pCont, int contLen, void *ahandle) { + STaosHeader *pHeader; + SMsgNode * pMsgNode; + char * msg; + int msgLen = 0; + SRpcConn * pConn = (SRpcConn *)thandle; + STaosRpc * pServer; + SRpcChann * pChann; + + if (pConn == NULL) return -1; + if (pConn->signature != pConn) return -1; + + pServer = pConn->pServer; + pChann = pServer->channList + pConn->chann; + pHeader = (STaosHeader *)(pCont - sizeof(STaosHeader)); + msg = (char *)pHeader; + msgLen = contLen + (int32_t)sizeof(STaosHeader); + + if ((pHeader->msgType & 1) == 0 && pConn->localPort) pHeader->port = pConn->localPort; + + if (pConn->spi) { + // add auth part + pHeader->spi = pConn->spi; + STaosDigest *pDigest = (STaosDigest *)(pCont + contLen); + pDigest->timeStamp = htonl(taosGetTimestampSec()); + msgLen += sizeof(STaosDigest); + pHeader->msgLen = (int32_t)htonl((uint32_t)msgLen); + taosBuildAuthHeader((uint8_t *)pHeader, msgLen - TSDB_AUTH_LEN, pDigest->auth, pConn->secret); + } else { + pHeader->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + + pthread_mutex_lock(&pChann->mutex); + + if ((pHeader->msgType & 1) == 0) { + // response + pConn->inType = 0; + tfree(pConn->pRspMsg); + pConn->pRspMsg = msg; + pConn->rspMsgLen = msgLen; + + if (pHeader->content[0] == TSDB_CODE_ACTION_IN_PROGRESS) pConn->inTranId--; + + } else { + // request + pMsgNode = (SMsgNode *)(pCont - sizeof(STaosHeader) - sizeof(SMsgNode)); + pMsgNode->msgLen = msgLen; + pMsgNode->next = NULL; + pMsgNode->ahandle = ahandle; + + if (pConn->outType) { + if (pConn->pTail) { + pConn->pTail->next = pMsgNode; + pConn->pTail = pMsgNode; + } else { + pConn->pTail = pMsgNode; + pConn->pHead = pMsgNode; + } + + tTrace("%s cid:%d sid:%d id:%s, msg:%s is put into queue pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, taosMsg[pHeader->msgType], pConn); + msgLen = 0; + + } else { + assert(pConn->pMsgNode == NULL); + if (pConn->pMsgNode) { + tError("%s cid:%d sid:%d id:%s, bug, there shall be no pengding req pConn:%p", + pServer->label, pConn->chann, pConn->sid, pConn->meterId, pConn); + } + + pConn->outType = pHeader->msgType; + pConn->outTranId = pHeader->tranId; + pConn->pMsgNode = pMsgNode; + pConn->rspReceived = 0; + if (pMsgNode->ahandle) pConn->ahandle = pMsgNode->ahandle; + } + } + + pthread_mutex_unlock(&pChann->mutex); + + if (msgLen) { + taosSendDataToPeer(pConn, (char *)pHeader, msgLen); + if (pHeader->msgType & 1) { + taosTmrReset(taosProcessTaosTimer, tsRpcTimer, pConn, pChann->tmrCtrl, &pConn->pTimer); + } + } + + return contLen; +} + +int taosReSendRspToPeer(SRpcConn *pConn) { + STaosHeader *pHeader; + int writtenLen; + STaosRpc * pServer = pConn->pServer; + + if (pConn->pRspMsg == NULL || pConn->rspMsgLen <= 0) { + tError("%s cid:%d sid:%d id:%s, rsp is null", pServer->label, pConn->chann, pConn->sid, pConn->meterId); + return -1; + } + + pHeader = (STaosHeader *)pConn->pRspMsg; + if (pHeader->msgLen <= sizeof(SIntMsg) + 1 || pHeader->msgType <= 0) { + tError("%s cid:%d sid:%d id:%s, rsp is null, rspLen:%d, msgType:%d", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pHeader->msgLen, pHeader->msgType); + return -1; + } + + writtenLen = + (*taosSendData[pServer->type])(pConn->peerIp, pConn->peerPort, pConn->pRspMsg, pConn->rspMsgLen, pConn->chandle); + + if (writtenLen != pConn->rspMsgLen) { + tError("%s cid:%d sid:%d id:%s, failed to re-send %s, reason:%s pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, taosMsg[(int)pHeader->msgType], strerror(errno), pConn); + } else { + tTrace("%s cid:%d sid:%d id:%s, msg:%s is re-sent to %s:%hu, len:%d pConn:%p", pServer->label, pConn->chann, + pConn->sid, pConn->meterId, taosMsg[(int)pHeader->msgType], pConn->peerIpstr, pConn->peerPort, + pConn->rspMsgLen, pConn); + } + + return 0; +} + +void taosProcessTaosTimer(void *param, void *tmrId) { + STaosHeader *pHeader = NULL; + SRpcConn * pConn = (SRpcConn *)param; + + if (pConn->signature != param) { + tError("pConn Signature:0x%x, pConn:0x%x not matched", pConn->signature, param); + return; + } + + STaosRpc * pServer = pConn->pServer; + SRpcChann *pChann = pServer->channList + pConn->chann; + + if (pConn->pTimer != tmrId) { + tTrace("%s cid:%d sid:%d id:%s, timer:%p already processed pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, tmrId, pConn); + return; + } + + pthread_mutex_lock(&pChann->mutex); + + if (pConn->rspReceived) { + tTrace("%s cid:%d sid:%d id:%s, rsp just received, pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pConn); + } else if (pConn->outType == 0) { + tTrace("%s cid:%d sid:%d id:%s, outtype is zero, pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pConn); + } else { + tTrace("%s cid:%d sid:%d id:%s, expected %s is not received, pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, taosMsg[(int)pConn->outType + 1], pConn); + pConn->pTimer = NULL; + pConn->retry++; + + if (pConn->retry < 3) { + tTrace("%s cid:%d sid:%d id:%s, re-send msg:%s to %s:%hu pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, taosMsg[pConn->outType], pConn->peerIpstr, pConn->peerPort, pConn); + if (pConn->pMsgNode && pConn->pMsgNode->msgLen > 0) { + pHeader = (STaosHeader *)((char *)pConn->pMsgNode + sizeof(SMsgNode)); + pHeader->destId = pConn->peerId; + if (pConn->spi) { + STaosDigest *pDigest = (STaosDigest *)(((char *)pHeader) + pConn->pMsgNode->msgLen - sizeof(STaosDigest)); + pDigest->timeStamp = htonl(taosGetTimestampSec()); + taosBuildAuthHeader((uint8_t *)pHeader, pConn->pMsgNode->msgLen - TSDB_AUTH_LEN, pDigest->auth, + pConn->secret); + } + } + } else { + // close the connection + tTrace("%s cid:%d sid:%d id:%s, failed to send msg:%s to %s:%hu pConn:%p", pServer->label, pConn->chann, + pConn->sid, pConn->meterId, taosMsg[pConn->outType], pConn->peerIpstr, pConn->peerPort, pConn); + if (pConn->rspReceived == 0) { + pConn->rspReceived = 1; + + SSchedMsg schedMsg; + schedMsg.fp = taosProcessSchedMsg; + schedMsg.msg = NULL; + schedMsg.ahandle = pConn->ahandle; + schedMsg.thandle = pConn; + taosScheduleTask(pServer->qhandle, &schedMsg); + } + } + } + + pthread_mutex_unlock(&pChann->mutex); + + if (pHeader) { + (*taosSendData[pServer->type])(pConn->peerIp, pConn->peerPort, (char *)pHeader, pConn->pMsgNode->msgLen, + pConn->chandle); + taosTmrReset(taosProcessTaosTimer, tsRpcTimer, pConn, pChann->tmrCtrl, &pConn->pTimer); + } +} + +void taosGetRpcConnInfo(void *thandle, uint32_t *peerId, uint32_t *peerIp, short *peerPort, int *cid, int *sid) { + SRpcConn *pConn = (SRpcConn *)thandle; + + *peerId = pConn->peerId; + *peerIp = pConn->peerIp; + *peerPort = pConn->peerPort; + + *cid = pConn->chann; + *sid = pConn->sid; +} + +int taosGetOutType(void *thandle) { + SRpcConn *pConn = (SRpcConn *)thandle; + if (pConn == NULL) return -1; + + return pConn->outType; +} + +void taosProcessSchedMsg(SSchedMsg *pMsg) { + SIntMsg * pHeader = (SIntMsg *)pMsg->msg; + SRpcConn *pConn = (SRpcConn *)pMsg->thandle; + if (pConn == NULL || pConn->signature != pMsg->thandle || pConn->pServer == NULL) return; + STaosRpc *pRpc = pConn->pServer; + + void *ahandle = (*(pRpc->fp))(pMsg->msg, pMsg->ahandle, pMsg->thandle); + + if (ahandle == NULL || pMsg->msg == NULL) { + taosCloseRpcConn(pConn); + } else { + pConn->ahandle = ahandle; + if (pHeader && ((pHeader->msgType & 1) == 0)) taosProcessResponse(pConn); + } + + if (pMsg->msg) free(pMsg->msg - sizeof(STaosHeader) + sizeof(SIntMsg)); +} + +void taosStopRpcConn(void *thandle) { + SRpcConn * pConn = (SRpcConn *)thandle; + STaosRpc * pServer = pConn->pServer; + SRpcChann *pChann = pServer->channList + pConn->chann; + + tTrace("%s cid:%d sid:%d id:%s, stop the connection pConn:%p", pServer->label, pConn->chann, pConn->sid, + pConn->meterId, pConn); + + pthread_mutex_lock(&pChann->mutex); + + if (pConn->outType) { + pConn->rspReceived = 1; + SSchedMsg schedMsg; + schedMsg.fp = taosProcessSchedMsg; + schedMsg.msg = NULL; + schedMsg.ahandle = pConn->ahandle; + schedMsg.thandle = pConn; + pthread_mutex_unlock(&pChann->mutex); + + taosScheduleTask(pServer->qhandle, &schedMsg); + } else { + pthread_mutex_unlock(&pChann->mutex); + taosCloseRpcConn(pConn); + } +} + +int taosAuthenticateMsg(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey) { + MD5_CTX context; + int ret = -1; + + MD5Init(&context); + MD5Update(&context, pKey, TSDB_KEY_LEN); + MD5Update(&context, pMsg, msgLen); + MD5Update(&context, pKey, TSDB_KEY_LEN); + MD5Final(&context); + + if (memcmp(context.digest, pAuth, sizeof(context.digest)) == 0) ret = 0; + + return ret; +} + +int taosBuildAuthHeader(uint8_t *pMsg, int msgLen, uint8_t *pAuth, uint8_t *pKey) { + MD5_CTX context; + + MD5Init(&context); + MD5Update(&context, pKey, TSDB_KEY_LEN); + MD5Update(&context, (uint8_t *)pMsg, msgLen); + MD5Update(&context, pKey, TSDB_KEY_LEN); + MD5Final(&context); + + memcpy(pAuth, context.digest, sizeof(context.digest)); + + return 0; +} diff --git a/src/rpc/src/tstring.c b/src/rpc/src/tstring.c new file mode 100644 index 000000000000..f646fcb1f5f3 --- /dev/null +++ b/src/rpc/src/tstring.c @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +char *taosMsg[] = {"null", + "registration", + "registration-rsp", + "submit", + "submit-rsp", + "nw-change", + "nw-change-rsp", + "deliver", + "deliver-rsp", + + "create", + "create-rsp", + "remove", + "remove-rsp", + "vpeers", + "vpeers-rsp", + "free-vnode", + "free-vnode-rsp", + "vpeer-cfg", + "vpeer-cfg-rsp", + "meter-cfg", + "meter-cfg-rsp", + + "vpeer-fwd", + "vpeer-fwd-rsp", + "sync", + "sync-rsp", + + "insert", + "insert-rsp", + "query", + "query-rsp", + "retrieve", + "retrieve-rsp", + + "connect", + "connect-rsp", + "create-acct", + "create-acct-rsp", + "create-user", + "create-user-rsp", + "drop-acct", + "drop-acct-rsp", + "drop-user", + "drop-user-rsp", + "alter-user", + "alter-user-rsp", + "create-mnode", + "create-mnode-rsp", + "drop-mnode", + "drop-mnode-rsp", + "create-dnode", + "create-dnode-rsp", + "drop-dnode", + "drop-dnode-rsp", + "create-db", + "create-db-rsp", + "drop-db", + "drop-db-rsp", + "use-db", + "use-db-rsp", + "create-table", + "create-table-rsp", + "drop-table", + "drop-table-rsp", + "meter-info", + "meter-info-rsp", + "metric-meta", + "metric-meta-rsp", + "show", + "show-rsp", + + "forward", + "forward-rsp", + + "cfg-dnode", + "cfg-dnode-rsp", + "cfg-mnode", + "cfg-mnode-rsp", + + "kill-query", + "kill-query-rsp", + "kill-stream", + "kill-stream-rsp", + "kill-connection", + "kill-connectoin-rsp", // 78 + "alter-stream", + "alter-stream-rsp", + "alter-table", + "alter-table-rsp", + + "", + "", + "", + "", + "", + "", + "", + "", + + "heart-beat", // 91 + "heart-beat-rsp", + "status", + "status-rsp", + "grant", + "grant-rsp", + "alter-acct", + "alter-acct-rsp", + "invalid"}; + +char *tsError[] = {"success", + "in progress", + "", + "", + "", + + "last session not finished", // 5 + "invalid session ID", + "invalid tran ID", + "invalid msg type", + "alredy processed", + "authentication failure", // 10 + "wrong msg size", + "unexpected response", + "invalid response type", + "no resource", + "invalid time stamp", // 15 + "mismatched meter ID", + "transcation not finished", + "not online", + "send failed", + "not active session", // 20 + "insert failed", + "App error", + "invalid IE", + "invalid value", + "service not available", // 25 + "already there", + "invalid meter ID", + "invalid SQL", + "failed to connect to server", + "invalid msg len", // 30 + "invalid DB", + "invalid table", + "DB already there", + "table already there", + "invalid user name", // 35 + "invalid acct name", + "invalid password", + "DB not selected", + "memory corrupted", + "user name exists", // 40 + "not authorized", + "login disconnected, login again", + "mgmt master node not available", + "not configured", + "invalid option", // 45 + "node offline", + "sync required", + "more dnodes are needed", + "node in unsynced state", + "too slow", // 50 + "others", + "can't remove dnode which is master", + "wrong schema", + "no results", + "num of users execeed maxUsers", + "num of databases execeed maxDbs", + "num of tables execeed maxTables", + "num of dnodes execeed maxDnodes", + "num of accounts execeed maxAccts", + "accout name exists", // 60 + "dnode ip exists", + "sdb error", + "metric meta expired", + "not ready", + "too many sessions on server", // 65 + "too many sessions from app", + "session to dest is already there", + "query list not there, please show again", + "server out of memory", + "invalid query handle", // 70 + "tables related to metric exist", + "can't drop monitor database or tables", + "commit log init failed", + "vgroup init failed", + "data is already imported", // 75 + "not supported operation", + "invalid query id string", + "invalid stream id string", + "invalid connection string", + "dnode not balanced", // 80 + "client out of memory", + "data value overflow", + "query cancelled", + "grant timeseries limited", // 84 + "grant expired", // 85 + "client no disk space", + "DB file corrupted", + "version of client and server not match", + "invalid account parameter", + "no enough available time series", + "storage credit is used up", + "query credit is used up", // 92 + "grant database limited", + "grant user limited", + "grant connection limited", + "grant stream limited", + "grant writing speed limited", + "grant storage limited", + "grant query time limited", // 99 + "grant account limited", + "grant dnode limited", + "grant cpu core limited", // 102 + "session not ready", + "batch size too big", + "timestamp out of range", + "invalid query message" +}; diff --git a/src/rpc/src/ttcpclient.c b/src/rpc/src/ttcpclient.c new file mode 100644 index 000000000000..ba758dbf7f25 --- /dev/null +++ b/src/rpc/src/ttcpclient.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tlog.h" +#include "tlog.h" +#include "tsocket.h" +#include "ttcpclient.h" +#include "tutil.h" + +typedef struct _tcp_fd { + int fd; // TCP socket FD + void * thandle; + uint32_t ip; + char ipstr[20]; + short port; + struct _tcp_client *pTcp; + struct _tcp_fd * prev, *next; +} STcpFd; + +typedef struct _tcp_client { + pthread_t thread; + STcpFd * pHead; + pthread_mutex_t mutex; + pthread_cond_t fdReady; + int pollFd; + int numOfFds; + char label[12]; + char ipstr[20]; + void * shandle; // handle passed by upper layer during server initialization + void *(*processData)(char *data, int dataLen, unsigned int ip, short port, void *shandle, void *thandle, + void *chandle); + // char buffer[128000]; +} STcpClient; + +#define maxTcpEvents 100 + +static void taosCleanUpTcpFdObj(STcpFd *pFdObj) { + STcpClient *pTcp; + + if (pFdObj == NULL) return; + + pTcp = pFdObj->pTcp; + if (pTcp == NULL) { + tError("double free TcpFdObj!!!!"); + return; + } + + epoll_ctl(pTcp->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL); + close(pFdObj->fd); + + pthread_mutex_lock(&pTcp->mutex); + + pTcp->numOfFds--; + + if (pTcp->numOfFds < 0) tError("%s number of TCP FDs shall never be negative", pTcp->label); + + // remove from the FdObject list + + if (pFdObj->prev) { + (pFdObj->prev)->next = pFdObj->next; + } else { + pTcp->pHead = pFdObj->next; + } + + if (pFdObj->next) { + (pFdObj->next)->prev = pFdObj->prev; + } + + pthread_mutex_unlock(&pTcp->mutex); + + // notify the upper layer to clean the associated context + if (pFdObj->thandle) (*(pTcp->processData))(NULL, 0, 0, 0, pTcp->shandle, pFdObj->thandle, NULL); + + tTrace("%s TCP FD is cleaned up, numOfFds:%d", pTcp->label, pTcp->numOfFds); + + memset(pFdObj, 0, sizeof(STcpFd)); + + tfree(pFdObj); +} + +void taosCleanUpTcpClient(void *chandle) { + STcpClient *pTcp = (STcpClient *)chandle; + if (pTcp == NULL) return; + + while (pTcp->pHead) { + taosCleanUpTcpFdObj(pTcp->pHead); + pTcp->pHead = pTcp->pHead->next; + } + + close(pTcp->pollFd); + + pthread_cancel(pTcp->thread); + pthread_join(pTcp->thread, NULL); + + // tTrace (":%s, all connections are cleaned up", pTcp->label); + + tfree(pTcp); +} + +static void *taosReadTcpData(void *param) { + STcpClient * pTcp = (STcpClient *)param; + int i, fdNum; + STcpFd * pFdObj; + struct epoll_event events[maxTcpEvents]; + + while (1) { + pthread_mutex_lock(&pTcp->mutex); + if (pTcp->numOfFds < 1) pthread_cond_wait(&pTcp->fdReady, &pTcp->mutex); + pthread_mutex_unlock(&pTcp->mutex); + + fdNum = epoll_wait(pTcp->pollFd, events, maxTcpEvents, -1); + if (fdNum < 0) continue; + + for (i = 0; i < fdNum; ++i) { + pFdObj = events[i].data.ptr; + + if (events[i].events & EPOLLERR) { + tTrace("%s TCP error happened on FD\n", pTcp->label); + taosCleanUpTcpFdObj(pFdObj); + continue; + } + + if (events[i].events & EPOLLHUP) { + tTrace("%s TCP FD hang up\n", pTcp->label); + taosCleanUpTcpFdObj(pFdObj); + continue; + } + + void *buffer = malloc(1024); + int headLen = taosReadMsg(pFdObj->fd, buffer, sizeof(STaosHeader)); + if (headLen != sizeof(STaosHeader)) { + tError("%s read error, headLen:%d", pTcp->label, headLen); + taosCleanUpTcpFdObj(pFdObj); + continue; + } + + int dataLen = (int32_t)htonl((uint32_t)((STaosHeader *)buffer)->msgLen); + if (dataLen > 1024) buffer = realloc(buffer, (size_t)dataLen); + + int leftLen = dataLen - headLen; + int retLen = taosReadMsg(pFdObj->fd, buffer + headLen, leftLen); + + //tTrace("%s TCP data is received, ip:%s port:%u len:%d", pTcp->label, pFdObj->ipstr, pFdObj->port, dataLen); + + if (leftLen != retLen) { + tError("%s read error, leftLen:%d retLen:%d", pTcp->label, leftLen, retLen); + taosCleanUpTcpFdObj(pFdObj); + continue; + } + + pFdObj->thandle = + (*(pTcp->processData))(buffer, dataLen, pFdObj->ip, pFdObj->port, pTcp->shandle, pFdObj->thandle, pFdObj); + + if (pFdObj->thandle == NULL) taosCleanUpTcpFdObj(pFdObj); + } + } + + return NULL; +} + +void *taosInitTcpClient(char *ip, short port, char *label, int num, void *fp, void *shandle) { + STcpClient * pTcp; + pthread_attr_t thattr; + + pTcp = (STcpClient *)malloc(sizeof(STcpClient)); + memset(pTcp, 0, sizeof(STcpClient)); + strcpy(pTcp->label, label); + strcpy(pTcp->ipstr, ip); + pTcp->shandle = shandle; + + if (pthread_mutex_init(&(pTcp->mutex), NULL) < 0) { + tError("%s failed to init TCP mutex, reason:%s", label, strerror(errno)); + return NULL; + } + + if (pthread_cond_init(&(pTcp->fdReady), NULL) != 0) { + tError("%s init TCP condition variable failed, reason:%s\n", label, strerror(errno)); + return NULL; + } + + pTcp->pollFd = epoll_create(10); // size does not matter + if (pTcp->pollFd < 0) { + tError("%s failed to create TCP epoll", label); + return NULL; + } + + pTcp->processData = fp; + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&(pTcp->thread), &thattr, taosReadTcpData, (void *)(pTcp)) != 0) { + tError("%s failed to create TCP read data thread, reason:%s", label, strerror(errno)); + return NULL; + } + + tTrace("%s TCP client is initialized, ip:%s port:%u", label, ip, port); + + return pTcp; +} + +void taosCloseTcpClientConnection(void *chandle) { + STcpFd *pFdObj = (STcpFd *)chandle; + + if (pFdObj == NULL) return; + + taosCleanUpTcpFdObj(pFdObj); +} + +void *taosOpenTcpClientConnection(void *shandle, void *thandle, char *ip, short port) { + STcpClient * pTcp = (STcpClient *)shandle; + STcpFd * pFdObj; + struct epoll_event event; + struct in_addr destIp; + int fd; + + /* + if ( (strcmp(ip, "127.0.0.1") == 0 ) || (strcmp(ip, "localhost") == 0 ) ) { + fd = taosOpenUDClientSocket(ip, port); + } else { + fd = taosOpenTcpClientSocket(ip, port, pTcp->ipstr); + } + */ + + fd = taosOpenTcpClientSocket(ip, port, pTcp->ipstr); + + if (fd <= 0) return NULL; + + pFdObj = (STcpFd *)malloc(sizeof(STcpFd)); + if (pFdObj == NULL) { + tError("%s no enough resource to allocate TCP FD IDs", pTcp->label); + tclose(fd); + return NULL; + } + + memset(pFdObj, 0, sizeof(STcpFd)); + pFdObj->fd = fd; + strcpy(pFdObj->ipstr, ip); + inet_aton(ip, &destIp); + pFdObj->ip = destIp.s_addr; + pFdObj->port = port; + pFdObj->pTcp = pTcp; + pFdObj->thandle = thandle; + +// add this new FD into epoll +#ifndef _NINGSI_VERSION + event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP; +#else + event.events = EPOLLIN | EPOLLPRI; +#endif + event.data.ptr = pFdObj; + if (epoll_ctl(pTcp->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { + tError("%s failed to add TCP FD for epoll, error:%s", pTcp->label, strerror(errno)); + tfree(pFdObj); + tclose(fd); + return NULL; + } + + // notify the data process, add into the FdObj list + pthread_mutex_lock(&(pTcp->mutex)); + + pFdObj->next = pTcp->pHead; + + if (pTcp->pHead) (pTcp->pHead)->prev = pFdObj; + + pTcp->pHead = pFdObj; + + pTcp->numOfFds++; + pthread_cond_signal(&pTcp->fdReady); + + pthread_mutex_unlock(&(pTcp->mutex)); + + tTrace("%s TCP connection to ip:%s port:%u is created, numOfFds:%d", pTcp->label, ip, port, pTcp->numOfFds); + + return pFdObj; +} + +int taosSendTcpClientData(uint32_t ip, short port, char *data, int len, void *chandle) { + STcpFd *pFdObj = (STcpFd *)chandle; + + if (chandle == NULL) return -1; + + return (int)send(pFdObj->fd, data, (size_t)len, 0); +} diff --git a/src/rpc/src/ttcpserver.c b/src/rpc/src/ttcpserver.c new file mode 100644 index 000000000000..872d362025f8 --- /dev/null +++ b/src/rpc/src/ttcpserver.c @@ -0,0 +1,513 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tlog.h" +#include "tlog.h" +#include "tsocket.h" +#include "ttcpserver.h" +#include "tutil.h" + +#define TAOS_IPv4ADDR_LEN 16 + +typedef struct _fd_obj { + int fd; // TCP socket FD + void * thandle; // handle from upper layer, like TAOS + char ipstr[TAOS_IPv4ADDR_LEN]; + unsigned int ip; + unsigned short port; + struct _thread_obj *pThreadObj; + struct _fd_obj * prev, *next; +} SFdObj; + +typedef struct _thread_obj { + pthread_t thread; + SFdObj * pHead; + pthread_mutex_t threadMutex; + pthread_cond_t fdReady; + int pollFd; + int numOfFds; + int threadId; + char label[12]; + // char buffer[128000]; // buffer to receive data + void *shandle; // handle passed by upper layer during server initialization + void *(*processData)(char *data, int dataLen, unsigned int ip, short port, void *shandle, void *thandle, + void *chandle); +} SThreadObj; + +typedef struct { + char ip[40]; + short port; + char label[12]; + int numOfThreads; + void * shandle; + SThreadObj *pThreadObj; + pthread_t thread; +} SServerObj; + +static void taosCleanUpFdObj(SFdObj *pFdObj) { + SThreadObj *pThreadObj; + + if (pFdObj == NULL) return; + + pThreadObj = pFdObj->pThreadObj; + if (pThreadObj == NULL) { + tError("FdObj double clean up!!!"); + return; + } + + epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_DEL, pFdObj->fd, NULL); + close(pFdObj->fd); + + pthread_mutex_lock(&pThreadObj->threadMutex); + + pThreadObj->numOfFds--; + + if (pThreadObj->numOfFds < 0) + tError("%s TCP thread:%d, number of FDs shall never be negative", pThreadObj->label, pThreadObj->threadId); + + // remove from the FdObject list + + if (pFdObj->prev) { + (pFdObj->prev)->next = pFdObj->next; + } else { + pThreadObj->pHead = pFdObj->next; + } + + if (pFdObj->next) { + (pFdObj->next)->prev = pFdObj->prev; + } + + pthread_mutex_unlock(&pThreadObj->threadMutex); + + // notify the upper layer, so it will clean the associated context + if (pFdObj->thandle) (*(pThreadObj->processData))(NULL, 0, 0, 0, pThreadObj->shandle, pFdObj->thandle, NULL); + + tTrace("%s TCP thread:%d, FD is cleaned up, numOfFds:%d", pThreadObj->label, pThreadObj->threadId, + pThreadObj->numOfFds); + + memset(pFdObj, 0, sizeof(SFdObj)); + + tfree(pFdObj); +} + +void taosCloseTcpServerConnection(void *chandle) { + SFdObj *pFdObj = (SFdObj *)chandle; + + if (pFdObj == NULL) return; + + taosCleanUpFdObj(pFdObj); +} + +void taosCleanUpTcpServer(void *handle) { + int i; + SThreadObj *pThreadObj; + SServerObj *pServerObj = (SServerObj *)handle; + + if (pServerObj == NULL) return; + + pthread_cancel(pServerObj->thread); + pthread_join(pServerObj->thread, NULL); + + for (i = 0; i < pServerObj->numOfThreads; ++i) { + pThreadObj = pServerObj->pThreadObj + i; + + while (pThreadObj->pHead) { + taosCleanUpFdObj(pThreadObj->pHead); + pThreadObj->pHead = pThreadObj->pHead; + } + + close(pThreadObj->pollFd); + pthread_cancel(pThreadObj->thread); + pthread_join(pThreadObj->thread, NULL); + pthread_cond_destroy(&(pThreadObj->fdReady)); + pthread_mutex_destroy(&(pThreadObj->threadMutex)); + } + + tfree(pServerObj->pThreadObj); + tTrace("TCP:%s, TCP server is cleaned up", pServerObj->label); + + tfree(pServerObj); +} + +#define maxEvents 10 + +static void taosProcessTcpData(void *param) { + SThreadObj * pThreadObj; + int i, fdNum; + SFdObj * pFdObj; + struct epoll_event events[maxEvents]; + + pThreadObj = (SThreadObj *)param; + + while (1) { + pthread_mutex_lock(&pThreadObj->threadMutex); + if (pThreadObj->numOfFds < 1) { + pthread_cond_wait(&pThreadObj->fdReady, &pThreadObj->threadMutex); + } + pthread_mutex_unlock(&pThreadObj->threadMutex); + + fdNum = epoll_wait(pThreadObj->pollFd, events, maxEvents, -1); + if (fdNum < 0) continue; + + for (i = 0; i < fdNum; ++i) { + pFdObj = events[i].data.ptr; + + if (events[i].events & EPOLLERR) { + tTrace("%s TCP thread:%d, error happened on FD", pThreadObj->label, pThreadObj->threadId); + taosCleanUpFdObj(pFdObj); + continue; + } + + if (events[i].events & EPOLLHUP) { + tTrace("%s TCP thread:%d, FD hang up", pThreadObj->label, pThreadObj->threadId); + taosCleanUpFdObj(pFdObj); + continue; + } + + void *buffer = malloc(1024); + int headLen = taosReadMsg(pFdObj->fd, buffer, sizeof(STaosHeader)); + if (headLen != sizeof(STaosHeader)) { + tError("%s read error, headLen:%d", pThreadObj->label, headLen); + taosCleanUpFdObj(pFdObj); + continue; + } + + int dataLen = (int32_t)htonl((uint32_t)((STaosHeader *)buffer)->msgLen); + if (dataLen > 1024) buffer = realloc(buffer, (size_t)dataLen); + + int leftLen = dataLen - headLen; + int retLen = taosReadMsg(pFdObj->fd, buffer + headLen, leftLen); + + // tTrace("%s TCP data is received, ip:%s port:%u len:%d", + // pThreadObj->label, pFdObj->ipstr, pFdObj->port, dataLen); + + if (leftLen != retLen) { + tError("%s read error, leftLen:%d retLen:%d", pThreadObj->label, leftLen, retLen); + taosCleanUpFdObj(pFdObj); + continue; + } + + pFdObj->thandle = (*(pThreadObj->processData))(buffer, dataLen, pFdObj->ip, (int16_t)pFdObj->port, + pThreadObj->shandle, pFdObj->thandle, pFdObj); + + if (pFdObj->thandle == NULL) taosCleanUpFdObj(pFdObj); + } + } +} + +void taosAcceptTcpConnection(void *arg) { + int connFd = -1; + struct sockaddr_in clientAddr; + int sockFd; + int threadId = 0; + SThreadObj * pThreadObj; + SServerObj * pServerObj; + SFdObj * pFdObj; + struct epoll_event event; + + pServerObj = (SServerObj *)arg; + + sockFd = taosOpenTcpServerSocket(pServerObj->ip, pServerObj->port); + + if (sockFd < 0) { + tError("%s failed to open TCP socket, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + return; + } else { + tTrace("%s TCP server is ready, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + } + + while (1) { + socklen_t addrlen = sizeof(clientAddr); + connFd = accept(sockFd, (struct sockaddr *)&clientAddr, &addrlen); + + if (connFd < 0) { + tError("%s TCP accept failure, errno:%d, reason:%s", pServerObj->label, errno, strerror(errno)); + continue; + } + + tTrace("%s TCP connection from ip:%s port:%u", pServerObj->label, inet_ntoa(clientAddr.sin_addr), + htons(clientAddr.sin_port)); + taosKeepTcpAlive(connFd); + + // pick up the thread to handle this connection + pThreadObj = pServerObj->pThreadObj + threadId; + + pFdObj = (SFdObj *)malloc(sizeof(SFdObj)); + if (pFdObj == NULL) { + tError("%s no enough resource to allocate TCP FD IDs", pServerObj->label); + close(connFd); + continue; + } + + memset(pFdObj, 0, sizeof(SFdObj)); + pFdObj->fd = connFd; + strcpy(pFdObj->ipstr, inet_ntoa(clientAddr.sin_addr)); + pFdObj->ip = clientAddr.sin_addr.s_addr; + pFdObj->port = htons(clientAddr.sin_port); + pFdObj->pThreadObj = pThreadObj; + +// add this new FD into epoll +#ifndef _NINGSI_VERSION + event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP; +#else + event.events = EPOLLIN | EPOLLPRI; +#endif + event.data.ptr = pFdObj; + if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, connFd, &event) < 0) { + tError("%s failed to add TCP FD for epoll, error:%s", pServerObj->label, strerror(errno)); + tfree(pFdObj); + close(connFd); + continue; + } + + // notify the data process, add into the FdObj list + pthread_mutex_lock(&(pThreadObj->threadMutex)); + + pFdObj->next = pThreadObj->pHead; + + if (pThreadObj->pHead) (pThreadObj->pHead)->prev = pFdObj; + + pThreadObj->pHead = pFdObj; + + pThreadObj->numOfFds++; + pthread_cond_signal(&pThreadObj->fdReady); + + pthread_mutex_unlock(&(pThreadObj->threadMutex)); + + tTrace("%s TCP thread:%d, a new connection, ip:%s port:%u, numOfFds:%d", pServerObj->label, pThreadObj->threadId, + pFdObj->ipstr, pFdObj->port, pThreadObj->numOfFds); + + // pick up next thread for next connection + threadId++; + threadId = threadId % pServerObj->numOfThreads; + } +} + +void taosAcceptUDConnection(void *arg) { + int connFd = -1; + int sockFd; + int threadId = 0; + SThreadObj * pThreadObj; + SServerObj * pServerObj; + SFdObj * pFdObj; + struct epoll_event event; + + pServerObj = (SServerObj *)arg; + sockFd = taosOpenUDServerSocket(pServerObj->ip, pServerObj->port); + + if (sockFd < 0) { + tError("%s failed to open UD socket, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + return; + } else { + tTrace("%s UD server is ready, ip:%s, port:%u", pServerObj->label, pServerObj->ip, pServerObj->port); + } + + while (1) { + connFd = accept(sockFd, NULL, NULL); + + if (connFd < 0) { + tError("%s UD accept failure, errno:%d, reason:%s", pServerObj->label, errno, strerror(errno)); + continue; + } + + // pick up the thread to handle this connection + pThreadObj = pServerObj->pThreadObj + threadId; + + pFdObj = (SFdObj *)malloc(sizeof(SFdObj)); + if (pFdObj == NULL) { + tError("%s no enough resource to allocate TCP FD IDs", pServerObj->label); + close(connFd); + continue; + } + + memset(pFdObj, 0, sizeof(SFdObj)); + pFdObj->fd = connFd; + pFdObj->pThreadObj = pThreadObj; + +// add this new FD into epoll +#ifndef _NINGSI_VERSION + event.events = EPOLLIN | EPOLLPRI | EPOLLWAKEUP; +#else + event.events = EPOLLIN | EPOLLPRI; +#endif + event.data.ptr = pFdObj; + if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, connFd, &event) < 0) { + tError("%s failed to add UD FD for epoll, error:%s", pServerObj->label, strerror(errno)); + tfree(pFdObj); + close(connFd); + continue; + } + + // notify the data process, add into the FdObj list + pthread_mutex_lock(&(pThreadObj->threadMutex)); + + pFdObj->next = pThreadObj->pHead; + + if (pThreadObj->pHead) (pThreadObj->pHead)->prev = pFdObj; + + pThreadObj->pHead = pFdObj; + + pThreadObj->numOfFds++; + pthread_cond_signal(&pThreadObj->fdReady); + + pthread_mutex_unlock(&(pThreadObj->threadMutex)); + + tTrace("%s UD thread:%d, a new connection, numOfFds:%d", pServerObj->label, pThreadObj->threadId, + pThreadObj->numOfFds); + + // pick up next thread for next connection + threadId++; + threadId = threadId % pServerObj->numOfThreads; + } +} + +void *taosInitTcpServer(char *ip, short port, char *label, int numOfThreads, void *fp, void *shandle) { + int i; + SServerObj * pServerObj; + pthread_attr_t thattr; + SThreadObj * pThreadObj; + + pServerObj = (SServerObj *)malloc(sizeof(SServerObj)); + strcpy(pServerObj->ip, ip); + pServerObj->port = port; + strcpy(pServerObj->label, label); + pServerObj->numOfThreads = numOfThreads; + + pServerObj->pThreadObj = (SThreadObj *)malloc(sizeof(SThreadObj) * (size_t)numOfThreads); + if (pServerObj->pThreadObj == NULL) { + tError("TCP:%s no enough memory", label); + return NULL; + } + memset(pServerObj->pThreadObj, 0, sizeof(SThreadObj) * (size_t)numOfThreads); + + pThreadObj = pServerObj->pThreadObj; + for (i = 0; i < numOfThreads; ++i) { + pThreadObj->processData = fp; + strcpy(pThreadObj->label, label); + pThreadObj->shandle = shandle; + + if (pthread_mutex_init(&(pThreadObj->threadMutex), NULL) < 0) { + tError("%s failed to init TCP process data mutex, reason:%s", label, strerror(errno)); + return NULL; + } + + if (pthread_cond_init(&(pThreadObj->fdReady), NULL) != 0) { + tError("%s init TCP condition variable failed, reason:%s\n", label, strerror(errno)); + return NULL; + } + + pThreadObj->pollFd = epoll_create(10); // size does not matter + if (pThreadObj->pollFd < 0) { + tError("%s failed to create TCP epoll", label); + return NULL; + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&(pThreadObj->thread), &thattr, (void *)taosProcessTcpData, (void *)(pThreadObj)) != 0) { + tError("%s failed to create TCP process data thread, reason:%s", label, strerror(errno)); + return NULL; + } + + pThreadObj->threadId = i; + pThreadObj++; + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&(pServerObj->thread), &thattr, (void *)taosAcceptTcpConnection, (void *)(pServerObj)) != 0) { + tError("%s failed to create TCP accept thread, reason:%s", label, strerror(errno)); + return NULL; + } + + /* + if ( pthread_create(&(pServerObj->thread), &thattr, + (void*)taosAcceptUDConnection, (void *)(pServerObj)) != 0 ) { + tError("%s failed to create UD accept thread, reason:%s", label, + strerror(errno)); + return NULL; + } + */ + pthread_attr_destroy(&thattr); + tTrace("%s TCP server is initialized, ip:%s port:%u numOfThreads:%d", label, ip, port, numOfThreads); + + return (void *)pServerObj; +} + +void taosListTcpConnection(void *handle, char *buffer) { + SServerObj *pServerObj; + SThreadObj *pThreadObj; + SFdObj * pFdObj; + int i, numOfFds, numOfConns; + char * msg; + + pServerObj = (SServerObj *)handle; + buffer[0] = 0; + msg = buffer; + numOfConns = 0; + + pThreadObj = pServerObj->pThreadObj; + + for (i = 0; i < pServerObj->numOfThreads; ++i) { + numOfFds = 0; + sprintf(msg, "TCP:%s Thread:%d number of connections:%d\n", pServerObj->label, pThreadObj->threadId, + pThreadObj->numOfFds); + msg = msg + strlen(msg); + pFdObj = pThreadObj->pHead; + while (pFdObj) { + sprintf(" ip:%s port:%u\n", pFdObj->ipstr, pFdObj->port); + msg = msg + strlen(msg); + numOfFds++; + numOfConns++; + pFdObj = pFdObj->next; + } + + if (numOfFds != pThreadObj->numOfFds) + tError("TCP:%s thread:%d BIG error, numOfFds:%d actual numOfFds:%d", pServerObj->label, pThreadObj->threadId, + pThreadObj->numOfFds, numOfFds); + + pThreadObj++; + } + + sprintf(msg, "TCP:%s total connections:%d\n", pServerObj->label, numOfConns); + + return; +} + +int taosSendTcpServerData(uint32_t ip, short port, char *data, int len, void *chandle) { + SFdObj *pFdObj = (SFdObj *)chandle; + + if (chandle == NULL) return -1; + + return (int)send(pFdObj->fd, data, (size_t)len, 0); +} diff --git a/src/rpc/src/tudp.c b/src/rpc/src/tudp.c new file mode 100644 index 000000000000..6ef2f431322c --- /dev/null +++ b/src/rpc/src/tudp.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "thash.h" +#include "thaship.h" +#include "tlog.h" +#include "tlog.h" +#include "tsocket.h" +#include "tsystem.h" +#include "ttimer.h" +#include "tudp.h" +#include "tutil.h" + +#define RPC_MAX_UDP_CONNS 256 +#define RPC_MAX_UDP_PKTS 1000 +#define RPC_UDP_BUF_TIME 5 // mseconds +#define RPC_MAX_UDP_SIZE 65480 + +int tsUdpDelay = 0; + +typedef struct { + void * signature; + int index; + int fd; + short port; // peer port + short localPort; // local port + char label[12]; // copy from udpConnSet; + pthread_t thread; + pthread_mutex_t mutex; + void * tmrCtrl; // copy from UdpConnSet; + void * hash; + void * shandle; // handle passed by upper layer during server initialization + void * pSet; + void *(*processData)(char *data, int dataLen, unsigned int ip, short port, void *shandle, void *thandle, + void *chandle); + char buffer[RPC_MAX_UDP_SIZE]; // buffer to receive data +} SUdpConn; + +typedef struct { + int index; + int server; + char ip[16]; // local IP + short port; // local Port + void * shandle; // handle passed by upper layer during server initialization + int threads; + char label[12]; + void * tmrCtrl; + pthread_t tcpThread; + int tcpFd; + void *(*fp)(char *data, int dataLen, uint32_t ip, short port, void *shandle, void *thandle, void *chandle); + SUdpConn udpConn[]; +} SUdpConnSet; + +typedef struct { + void * signature; + uint32_t ip; // dest IP + short port; // dest Port + SUdpConn * pConn; + struct sockaddr_in destAdd; + void * msgHdr; + int totalLen; + void * timer; + int emptyNum; +} SUdpBuf; + +typedef struct { + uint64_t handle; + uint16_t port; + int32_t msgLen; +} SPacketInfo; + +typedef struct { + int fd; + uint32_t ip; + uint16_t port; + SUdpConnSet *pSet; +} STransfer; + +typedef struct { + void * pTimer; + SUdpConnSet *pSet; + SUdpConn * pConn; + int dataLen; + uint32_t ip; + uint16_t port; + char data[96]; +} SMonitor; + +typedef struct { + uint64_t handle; + uint64_t hash; +} SHandleViaTcp; + +bool taosCheckHandleViaTcpValid(SHandleViaTcp *handleViaTcp) { + return handleViaTcp->hash == taosHashUInt64(handleViaTcp->handle); +} + +void taosInitHandleViaTcp(SHandleViaTcp *handleViaTcp, uint64_t handle) { + handleViaTcp->handle = handle; + handleViaTcp->hash = taosHashUInt64(handleViaTcp->handle); +} + +void taosProcessMonitorTimer(void *param, void *tmrId) { + SMonitor *pMonitor = (SMonitor *)param; + if (pMonitor->pTimer != tmrId) return; + + SUdpConnSet *pSet = pMonitor->pSet; + pMonitor->pTimer = NULL; + + if (pSet) { + char *data = malloc((size_t)pMonitor->dataLen); + memcpy(data, pMonitor->data, (size_t)pMonitor->dataLen); + + tTrace("%s monitor timer is expired, update the link status", pSet->label); + (*pSet->fp)(data, pMonitor->dataLen, pMonitor->ip, 0, pSet->shandle, NULL, NULL); + taosTmrReset(taosProcessMonitorTimer, 200, pMonitor, pSet->tmrCtrl, &pMonitor->pTimer); + } + + if (pMonitor->pSet == NULL) { + taosTmrStopA(&pMonitor->pTimer); + free(pMonitor); + } +} + +void *taosReadTcpData(void *argv) { + SMonitor * pMonitor = (SMonitor *)argv; + STaosHeader *pHead = (STaosHeader *)pMonitor->data; + SPacketInfo *pInfo = (SPacketInfo *)pHead->content; + SUdpConnSet *pSet = pMonitor->pSet; + int retLen, fd; + char ipstr[64]; + + pInfo->msgLen = (int32_t)htonl((uint32_t)pInfo->msgLen); + + tinet_ntoa(ipstr, pMonitor->ip); + tTrace("%s receive packet via TCP:%s:%d, msgLen:%d, handle:0x%x, source:0x%08x dest:0x%08x tranId:%d", + pSet->label, ipstr, pInfo->port, pInfo->msgLen, pInfo->handle, pHead->sourceId, pHead->destId, pHead->tranId); + + fd = taosOpenTcpClientSocket(ipstr, (int16_t)pInfo->port, tsLocalIp); + if (fd < 0) { + tError("%s failed to open TCP client socket ip:%s:%d", pSet->label, ipstr, pInfo->port); + pMonitor->pSet = NULL; + return NULL; + } + + SHandleViaTcp handleViaTcp; + taosInitHandleViaTcp(&handleViaTcp, pInfo->handle); + retLen = (int)taosWriteSocket(fd, (char *)&handleViaTcp, sizeof(SHandleViaTcp)); + + if (retLen != (int)sizeof(SHandleViaTcp)) { + tError("%s failed to send handle:0x%x to server, retLen:%d", pSet->label, pInfo->handle, retLen); + pMonitor->pSet = NULL; + } else { + tTrace("%s handle:0x%x is sent to server", pSet->label, pInfo->handle); + char *buffer = malloc((size_t)pInfo->msgLen); + retLen = taosReadMsg(fd, buffer, pInfo->msgLen); + pMonitor->pSet = NULL; + + if (retLen != pInfo->msgLen) { + tError("%s failed to read data from server, msgLen:%d retLen:%d", pSet->label, pInfo->msgLen, retLen); + } else { + (*pSet->fp)(buffer, pInfo->msgLen, pMonitor->ip, (int16_t)pInfo->port, pSet->shandle, NULL, pMonitor->pConn); + } + } + + taosCloseTcpSocket(fd); + + return NULL; +} + +int taosReceivePacketViaTcp(uint32_t ip, STaosHeader *pHead, SUdpConn *pConn) { + SUdpConnSet * pSet = pConn->pSet; + SPacketInfo * pInfo = (SPacketInfo *)pHead->content; + int code = 0; + pthread_attr_t thattr; + pthread_t thread; + + tTrace("%s receive packet via TCP, handle:0x%x, source:0x%08x dest:0x%08x tranId:%d", + pSet->label, pInfo->handle, pHead->sourceId, pHead->destId, pHead->tranId); + + SMonitor *pMonitor = (SMonitor *)calloc(1, sizeof(SMonitor)); + pMonitor->dataLen = sizeof(STaosHeader) + sizeof(SPacketInfo); + memcpy(pMonitor->data, pHead, (size_t)pMonitor->dataLen); + pMonitor->pSet = pSet; + pMonitor->ip = ip; + pMonitor->port = pInfo->port; + pMonitor->pConn = pConn; + taosTmrReset(taosProcessMonitorTimer, 0, pMonitor, pSet->tmrCtrl, &pMonitor->pTimer); + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); + code = pthread_create(&(thread), &thattr, taosReadTcpData, (void *)pMonitor); + if (code < 0) { + tTrace("%s faile to create thread to read tcp data, reason:%s", pSet->label, strerror(errno)); + } + + return code; +} + +void *taosRecvUdpData(void *param) { + struct sockaddr_in sourceAdd; + unsigned int addLen, dataLen; + SUdpConn * pConn = (SUdpConn *)param; + short port; + int minSize = sizeof(STaosHeader); + + memset(&sourceAdd, 0, sizeof(sourceAdd)); + addLen = sizeof(sourceAdd); + tTrace("%s UDP thread is created, index:%d", pConn->label, pConn->index); + + while (1) { + dataLen = + (uint32_t)recvfrom(pConn->fd, pConn->buffer, sizeof(pConn->buffer), 0, (struct sockaddr *)&sourceAdd, &addLen); + tTrace("%s msg is recv from 0x%x:%hu len:%d", pConn->label, sourceAdd.sin_addr.s_addr, ntohs(sourceAdd.sin_port), + dataLen); + + if (dataLen < sizeof(STaosHeader)) { + tError("%s recvfrom failed, reason:%s\n", pConn->label, strerror(errno)); + continue; + } + + port = (int16_t)ntohs(sourceAdd.sin_port); + + int processedLen = 0, leftLen = 0; + int msgLen = 0; + int count = 0; + char *msg = pConn->buffer; + while (processedLen < (int)dataLen) { + leftLen = dataLen - processedLen; + STaosHeader *pHead = (STaosHeader *)msg; + msgLen = (int32_t)htonl((uint32_t)pHead->msgLen); + if (leftLen < minSize || msgLen > leftLen || msgLen < minSize) { + tError("%s msg is messed up, dataLen:%d processedLen:%d count:%d msgLen:%d", + pConn->label, dataLen, processedLen, count, msgLen); + break; + } + + if (pHead->tcp == 1) { + taosReceivePacketViaTcp(sourceAdd.sin_addr.s_addr, (STaosHeader *)msg, pConn); + } else { + char *data = malloc((size_t)msgLen); + memcpy(data, msg, (size_t)msgLen); + (*(pConn->processData))(data, msgLen, sourceAdd.sin_addr.s_addr, port, pConn->shandle, NULL, pConn); + } + + processedLen += msgLen; + msg += msgLen; + count++; + } + + // tTrace("%s %d UDP packets are received together", pConn->label, count); + } + + return NULL; +} + +void *taosTransferDataViaTcp(void *argv) { + STransfer * pTransfer = (STransfer *)argv; + int connFd = pTransfer->fd; + int msgLen, retLen, leftLen; + uint64_t handle; + STaosHeader *pHeader = NULL, head; + SUdpConnSet *pSet = pTransfer->pSet; + + SHandleViaTcp handleViaTcp; + retLen = taosReadMsg(connFd, &handleViaTcp, sizeof(SHandleViaTcp)); + + if (retLen != sizeof(SHandleViaTcp)) { + tError("%s UDP server failed to read handle, retLen:%d", pSet->label, retLen); + taosCloseSocket(connFd); + free(pTransfer); + return NULL; + } + + if (!taosCheckHandleViaTcpValid(&handleViaTcp)) { + tError("%s UDP server read handle via tcp invalid, handle:%ld, hash:%ld", pSet->label, handleViaTcp.handle, + handleViaTcp.hash); + taosCloseSocket(connFd); + free(pTransfer); + return NULL; + } + + handle = handleViaTcp.handle; + + if (handle == 0) { + // receive a packet from client + tTrace("%s data will be received via TCP from 0x%x:%d", pSet->label, pTransfer->ip, pTransfer->port); + retLen = taosReadMsg(connFd, &head, sizeof(STaosHeader)); + if (retLen != (int)sizeof(STaosHeader)) { + tError("%s failed to read msg header, retLen:%d", pSet->label, retLen); + } else { + SMonitor *pMonitor = (SMonitor *)calloc(1, sizeof(SMonitor)); + pMonitor->dataLen = sizeof(STaosHeader); + memcpy(pMonitor->data, &head, (size_t)pMonitor->dataLen); + ((STaosHeader *)pMonitor->data)->msgLen = (int32_t)htonl(sizeof(STaosHeader)); + ((STaosHeader *)pMonitor->data)->tcp = 1; + pMonitor->ip = pTransfer->ip; + pMonitor->port = head.port; + pMonitor->pSet = pSet; + taosTmrReset(taosProcessMonitorTimer, 0, pMonitor, pSet->tmrCtrl, &pMonitor->pTimer); + + msgLen = (int32_t)htonl((uint32_t)head.msgLen); + char *buffer = malloc((size_t)msgLen); + leftLen = msgLen - (int)sizeof(STaosHeader); + retLen = taosReadMsg(connFd, buffer + sizeof(STaosHeader), leftLen); + pMonitor->pSet = NULL; + + if (retLen != leftLen) { + tError("%s failed to read data from client, leftLen:%d retLen:%d, error:%s", + pSet->label, leftLen, retLen, strerror(errno)); + } else { + tTrace("%s data is received from client via TCP from 0x%x:%d, msgLen:%d", pSet->label, pTransfer->ip, + pTransfer->port, msgLen); + pSet->index = (pSet->index + 1) % pSet->threads; + SUdpConn *pConn = pSet->udpConn + pSet->index; + memcpy(buffer, &head, sizeof(STaosHeader)); + (*pSet->fp)(buffer, msgLen, pTransfer->ip, head.port, pSet->shandle, NULL, pConn); + } + + taosWriteMsg(connFd, &handleViaTcp, sizeof(SHandleViaTcp)); + } + } else { + // send a packet to client + tTrace("%s send packet to client via TCP, handle:0x%x", pSet->label, handle); + pHeader = (STaosHeader *)handle; + msgLen = (int32_t)htonl((uint32_t)pHeader->msgLen); + + if (pHeader->tcp != 0 || msgLen < 1024) { + tError("%s invalid handle:%p, connection shall be closed", pSet->label, pHeader); + } else { + SMonitor *pMonitor = (SMonitor *)calloc(1, sizeof(SMonitor)); + pMonitor->dataLen = sizeof(STaosHeader); + memcpy(pMonitor->data, (void *)handle, (size_t)pMonitor->dataLen); + STaosHeader *pThead = (STaosHeader *)pMonitor->data; + pThead->tcp = 1; + pThead->msgType = (char)(pHeader->msgType - 1); + pThead->msgLen = (int32_t)htonl(sizeof(STaosHeader)); + pMonitor->ip = pTransfer->ip; + pMonitor->port = pTransfer->port; + pMonitor->pSet = pSet; + taosTmrReset(taosProcessMonitorTimer, 200, pMonitor, pSet->tmrCtrl, &pMonitor->pTimer); + + retLen = taosWriteMsg(connFd, (void *)handle, msgLen); + pMonitor->pSet = NULL; + + if (retLen != msgLen) { + tError("%s failed to send data to client, msgLen:%d retLen:%d", pSet->label, msgLen, retLen); + } else { + tTrace("%s data is sent to client successfully via TCP to 0x%x:%d, size:%d", + pSet->label, pTransfer->ip, pTransfer->port, msgLen); + } + } + } + + // retLen = taosReadMsg(connFd, &handleViaTcp, sizeof(handleViaTcp)); + free(pTransfer); + taosCloseSocket(connFd); + + return NULL; +} + +void *taosUdpTcpConnection(void *argv) { + int connFd = -1; + struct sockaddr_in clientAddr; + pthread_attr_t thattr; + pthread_t thread; + uint32_t sourceIp; + char ipstr[20]; + + SUdpConnSet *pSet = (SUdpConnSet *)argv; + + pSet->tcpFd = taosOpenTcpServerSocket(pSet->ip, pSet->port); + if (pSet->tcpFd < 0) { + tPrint("%s failed to create TCP socket %s:%d for UDP server, reason:%s", pSet->label, pSet->ip, pSet->port, + strerror(errno)); + taosKillSystem(); + return NULL; + } + + tTrace("%s UDP server is created, ip:%s:%d", pSet->label, pSet->ip, pSet->port); + + while (1) { + if (pSet->tcpFd < 0) break; + socklen_t addrlen = sizeof(clientAddr); + connFd = accept(pSet->tcpFd, (struct sockaddr *)&clientAddr, &addrlen); + + if (connFd < 0) { + tError("%s UDP server TCP accept failure, reason:%s", pSet->label, strerror(errno)); + continue; + } + + sourceIp = clientAddr.sin_addr.s_addr; + tinet_ntoa(ipstr, sourceIp); + tTrace("%s UDP server TCP connection from ip:%s:%u", pSet->label, ipstr, htons(clientAddr.sin_port)); + + STransfer *pTransfer = malloc(sizeof(STransfer)); + pTransfer->fd = connFd; + pTransfer->ip = sourceIp; + pTransfer->port = clientAddr.sin_port; + pTransfer->pSet = pSet; + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); + if (pthread_create(&(thread), &thattr, taosTransferDataViaTcp, (void *)pTransfer) < 0) { + tTrace("%s faile to create thread for UDP server, reason:%s", pSet->label, strerror(errno)); + taosCloseSocket(connFd); + } + } + + return NULL; +} + +void *taosInitUdpConnection(char *ip, short port, char *label, int threads, void *fp, void *shandle) { + pthread_attr_t thAttr; + SUdpConn * pConn; + SUdpConnSet * pSet; + + int size = (int)sizeof(SUdpConnSet) + threads * (int)sizeof(SUdpConn); + pSet = (SUdpConnSet *)malloc((size_t)size); + if (pSet == NULL) { + tError("%s failed to allocate UdpConn", label); + return NULL; + } + + memset(pSet, 0, (size_t)size); + strcpy(pSet->ip, ip); + pSet->port = port; + pSet->threads = threads; + pSet->shandle = shandle; + pSet->fp = fp; + pSet->tcpFd = -1; + strcpy(pSet->label, label); + + // if ( tsUdpDelay ) { + char udplabel[12]; + sprintf(udplabel, "%s.b", label); + pSet->tmrCtrl = taosTmrInit(RPC_MAX_UDP_CONNS * threads, 5, 5000, udplabel); + // } + + short ownPort; + for (int i = 0; i < threads; ++i) { + pConn = pSet->udpConn + i; + ownPort = (int16_t)(port ? port + i : 0); + pConn->fd = taosOpenUdpSocket(ip, ownPort); + if (pConn->fd < 0) { + tError("%s failed to open UDP socket %s:%d", label, ip, port); + return NULL; + } + + struct sockaddr_in sin; + unsigned int addrlen = sizeof(sin); + if (getsockname(pConn->fd, (struct sockaddr *)&sin, &addrlen) == 0 && sin.sin_family == AF_INET && + addrlen == sizeof(sin)) { + pConn->localPort = (int16_t)ntohs(sin.sin_port); + } + + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&pConn->thread, &thAttr, taosRecvUdpData, pConn) != 0) { + close(pConn->fd); + tError("%s failed to create thread to process UDP data, reason:%s", label, strerror(errno)); + return NULL; + } + + strcpy(pConn->label, label); + pConn->shandle = shandle; + pConn->processData = fp; + pConn->index = i; + pConn->pSet = pSet; + pConn->signature = pConn; + if (tsUdpDelay) { + pConn->hash = taosOpenIpHash(RPC_MAX_UDP_CONNS); + pthread_mutex_init(&pConn->mutex, NULL); + pConn->tmrCtrl = pSet->tmrCtrl; + } + } + + pthread_attr_destroy(&thAttr); + tTrace("%s UDP connection is initialized, ip:%s port:%u threads:%d", label, ip, port, threads); + + return pSet; +} + +void *taosInitUdpServer(char *ip, short port, char *label, int threads, void *fp, void *shandle) { + SUdpConnSet *pSet; + pSet = taosInitUdpConnection(ip, port, label, threads, fp, shandle); + if (pSet == NULL) return NULL; + + pSet->server = 1; + pSet->fp = fp; + + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); + + // not support by windows + // pthread_t thread; + // pSet->tcpThread = pthread_create(&(thread), &thattr, taosUdpTcpConnection, + // pSet); + pthread_create(&(pSet->tcpThread), &thattr, taosUdpTcpConnection, pSet); + + return pSet; +} + +void *taosInitUdpClient(char *ip, short port, char *label, int threads, void *fp, void *shandle) { + return taosInitUdpConnection(ip, port, label, threads, fp, shandle); +} + +void taosCleanUpUdpConnection(void *handle) { + SUdpConnSet *pSet = (SUdpConnSet *)handle; + SUdpConn * pConn; + + if (pSet == NULL) return; + if (pSet->server == 1) { + pthread_cancel(pSet->tcpThread); + } + + for (int i = 0; i < pSet->threads; ++i) { + pConn = pSet->udpConn + i; + pConn->signature = NULL; + taosCloseSocket(pConn->fd); + if (pConn->hash) { + taosCloseIpHash(pConn->hash); + pthread_mutex_destroy(&pConn->mutex); + } + + pthread_cancel(pConn->thread); + pthread_join(pConn->thread, NULL); + tTrace("chandle:%p is closed", pConn); + } + + if (pSet->tcpFd >= 0) taosCloseTcpSocket(pSet->tcpFd); + pSet->tcpFd = -1; + taosTmrCleanUp(pSet->tmrCtrl); + tfree(pSet); +} + +void *taosOpenUdpConnection(void *shandle, void *thandle, char *ip, short port) { + SUdpConnSet *pSet = (SUdpConnSet *)shandle; + + pSet->index = (pSet->index + 1) % pSet->threads; + + SUdpConn *pConn = pSet->udpConn + pSet->index; + pConn->port = port; + + tTrace("%s UDP connection is setup, ip: %s:%d, local: %s:%d", pConn->label, ip, port, pSet->ip, + ntohs((uint16_t)pConn->localPort)); + + return pConn; +} + +void taosRemoveUdpBuf(SUdpBuf *pBuf) { + taosTmrStopA(&pBuf->timer); + taosDeleteIpHash(pBuf->pConn->hash, pBuf->ip, pBuf->port); + + // tTrace("%s UDP buffer to:0x%lld:%d is removed", pBuf->pConn->label, + // pBuf->ip, pBuf->port); + + pBuf->signature = NULL; + taosFreeMsgHdr(pBuf->msgHdr); + free(pBuf); +} + +void taosProcessUdpBufTimer(void *param, void *tmrId) { + SUdpBuf *pBuf = (SUdpBuf *)param; + if (pBuf->signature != param) return; + if (pBuf->timer != tmrId) return; + + SUdpConn *pConn = pBuf->pConn; + + pthread_mutex_lock(&pConn->mutex); + + if (taosMsgHdrSize(pBuf->msgHdr) > 0) { + taosSendMsgHdr(pBuf->msgHdr, pConn->fd); + pBuf->totalLen = 0; + pBuf->emptyNum = 0; + } else { + pBuf->emptyNum++; + if (pBuf->emptyNum > 200) { + taosRemoveUdpBuf(pBuf); + pBuf = NULL; + } + } + + pthread_mutex_unlock(&pConn->mutex); + + if (pBuf) taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); +} + +SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, short port) { + SUdpBuf *pBuf = (SUdpBuf *)malloc(sizeof(SUdpBuf)); + memset(pBuf, 0, sizeof(SUdpBuf)); + + pBuf->ip = ip; + pBuf->port = port; + pBuf->pConn = pConn; + + pBuf->destAdd.sin_family = AF_INET; + pBuf->destAdd.sin_addr.s_addr = ip; + pBuf->destAdd.sin_port = (uint16_t)htons((uint16_t)port); + taosInitMsgHdr(&(pBuf->msgHdr), &(pBuf->destAdd), RPC_MAX_UDP_PKTS); + pBuf->signature = pBuf; + taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); + + // tTrace("%s UDP buffer to:0x%lld:%d is created", pBuf->pConn->label, + // pBuf->ip, pBuf->port); + + return pBuf; +} + +int taosSendPacketViaTcp(uint32_t ip, short port, char *data, int dataLen, void *chandle) { + SUdpConn * pConn = (SUdpConn *)chandle; + SUdpConnSet *pSet = (SUdpConnSet *)pConn->pSet; + int code = -1, retLen, msgLen; + char ipstr[64]; + char buffer[128]; + STaosHeader *pHead; + + if (pSet->server) { + // send from server + + pHead = (STaosHeader *)buffer; + memcpy(pHead, data, sizeof(STaosHeader)); + pHead->tcp = 1; + + SPacketInfo *pInfo = (SPacketInfo *)pHead->content; + pInfo->handle = (uint64_t)data; + pInfo->port = (uint16_t)pSet->port; + pInfo->msgLen = pHead->msgLen; + + msgLen = sizeof(STaosHeader) + sizeof(SPacketInfo); + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + code = taosSendUdpData(ip, port, buffer, msgLen, chandle); + tTrace("%s data from server will be sent via TCP:%d, msgType:%d, length:%d, handle:0x%x", + pSet->label, pInfo->port, pHead->msgType, htonl((uint32_t)pInfo->msgLen), pInfo->handle); + if (code > 0) code = dataLen; + } else { + // send from client + tTrace("%s data will be sent via TCP from client", pSet->label); + + // send a UDP header first to set up the connection + pHead = (STaosHeader *)buffer; + memcpy(pHead, data, sizeof(STaosHeader)); + pHead->tcp = 2; + msgLen = sizeof(STaosHeader); + pHead->msgLen = (int32_t)htonl(msgLen); + code = taosSendUdpData(ip, port, buffer, msgLen, chandle); + + pHead = (STaosHeader *)data; + + tinet_ntoa(ipstr, ip); + int fd = taosOpenTcpClientSocket(ipstr, pConn->port, tsLocalIp); + if (fd < 0) { + tError("%s failed to open TCP socket to:%s:%u to send packet", pSet->label, ipstr, pConn->port); + } else { + SHandleViaTcp handleViaTcp; + taosInitHandleViaTcp(&handleViaTcp, 0); + retLen = (int)taosWriteSocket(fd, (char *)&handleViaTcp, sizeof(SHandleViaTcp)); + + if (retLen != (int)sizeof(handleViaTcp)) { + tError("%s failed to send handle to server, retLen:%d", pSet->label, retLen); + } else { + retLen = taosWriteMsg(fd, data, dataLen); + if (retLen != dataLen) { + tError("%s failed to send data via TCP, dataLen:%d, retLen:%d, error:%s", pSet->label, dataLen, retLen, + strerror(errno)); + } else { + code = dataLen; + tTrace("%s data is sent via TCP successfully", pSet->label); + } + } + + taosReadMsg(fd, (char *)&handleViaTcp, sizeof(SHandleViaTcp)); + + taosCloseTcpSocket(fd); + } + } + + return code; +} + +int taosSendUdpData(uint32_t ip, short port, char *data, int dataLen, void *chandle) { + SUdpConn *pConn = (SUdpConn *)chandle; + SUdpBuf * pBuf; + + if (pConn == NULL || pConn->signature != pConn) return -1; + + if (dataLen >= RPC_MAX_UDP_SIZE) return taosSendPacketViaTcp(ip, port, data, dataLen, chandle); + + if (pConn->hash == NULL) { + struct sockaddr_in destAdd; + memset(&destAdd, 0, sizeof(destAdd)); + destAdd.sin_family = AF_INET; + destAdd.sin_addr.s_addr = ip; + destAdd.sin_port = htons((uint16_t)port); + + int ret = (int)sendto(pConn->fd, data, (size_t)dataLen, 0, (struct sockaddr *)&destAdd, sizeof(destAdd)); + tTrace("%s msg is sent to 0x%x:%hu len:%d ret:%d localPort:%hu chandle:0x%x", pConn->label, destAdd.sin_addr.s_addr, + port, dataLen, ret, pConn->localPort, chandle); + + return ret; + } + + pthread_mutex_lock(&pConn->mutex); + + pBuf = (SUdpBuf *)taosGetIpHash(pConn->hash, ip, port); + if (pBuf == NULL) { + pBuf = taosCreateUdpBuf(pConn, ip, port); + taosAddIpHash(pConn->hash, pBuf, ip, port); + } + + if ((pBuf->totalLen + dataLen > RPC_MAX_UDP_SIZE) || (taosMsgHdrSize(pBuf->msgHdr) >= RPC_MAX_UDP_PKTS)) { + taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer); + + taosSendMsgHdr(pBuf->msgHdr, pConn->fd); + pBuf->totalLen = 0; + } + + taosSetMsgHdrData(pBuf->msgHdr, data, dataLen); + + pBuf->totalLen += dataLen; + + pthread_mutex_unlock(&pConn->mutex); + + return dataLen; +} diff --git a/src/sdb/CMakeLists.txt b/src/sdb/CMakeLists.txt new file mode 100755 index 000000000000..26c75bb18e43 --- /dev/null +++ b/src/sdb/CMakeLists.txt @@ -0,0 +1,8 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc) + +ADD_LIBRARY(sdb ${SRC}) diff --git a/src/sdb/inc/hashint.h b/src/sdb/inc/hashint.h new file mode 100644 index 000000000000..052689f337a0 --- /dev/null +++ b/src/sdb/inc/hashint.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _sdb_int_hash_header_ +#define _sdb_int_hash_header_ + +void *sdbOpenIntHash(int maxSessions, int dataSize); +void sdbCloseIntHash(void *handle); +void *sdbAddIntHash(void *handle, void *key, void *pData); +void sdbDeleteIntHash(void *handle, void *key); +void *sdbGetIntHashData(void *handle, void *key); +void *sdbFetchIntHashData(void *handle, void *ptr, void **ppMeta); + +#endif diff --git a/src/sdb/inc/hashstr.h b/src/sdb/inc/hashstr.h new file mode 100644 index 000000000000..66f6e88857de --- /dev/null +++ b/src/sdb/inc/hashstr.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _sdb_str_hash_header_ +#define _sdb_str_hash_header_ + +void *sdbOpenStrHash(int maxSessions, int dataSize); +void sdbCloseStrHash(void *handle); +void *sdbAddStrHash(void *handle, void *key, void *pData); +void sdbDeleteStrHash(void *handle, void *key); +void *sdbGetStrHashData(void *handle, void *key); +void *sdbFetchStrHashData(void *handle, void *ptr, void **ppMeta); + +#endif diff --git a/src/sdb/inc/sdbint.h b/src/sdb/inc/sdbint.h new file mode 100644 index 000000000000..6e020bde9c79 --- /dev/null +++ b/src/sdb/inc/sdbint.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _sdbint_header_ +#define _sdbint_header_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hashint.h" +#include "hashstr.h" +#include "sdb.h" +#include "tchecksum.h" +#include "tlog.h" +#include "trpc.h" +#include "tutil.h" + +#define sdbError(...) \ + if (sdbDebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR MND-SDB ", 255, __VA_ARGS__); \ + } +#define sdbWarn(...) \ + if (sdbDebugFlag & DEBUG_WARN) { \ + tprintf("WARN MND-SDB ", sdbDebugFlag, __VA_ARGS__); \ + } +#define sdbTrace(...) \ + if (sdbDebugFlag & DEBUG_TRACE) { \ + tprintf("MND-SDB ", sdbDebugFlag, __VA_ARGS__); \ + } +#define sdbPrint(...) \ + { tprintf("MND-SDB ", 255, __VA_ARGS__); } + +#define sdbLError(...) taosLogError(__VA_ARGS__) sdbError(__VA_ARGS__) +#define sdbLWarn(...) taosLogWarn(__VA_ARGS__) sdbWarn(__VA_ARGS__) +#define sdbLPrint(...) taosLogPrint(__VA_ARGS__) sdbPrint(__VA_ARGS__) + +#define SDB_MAX_PEERS 4 +#define SDB_DELIMITER 0xFFF00F00 +#define SDB_ENDCOMMIT 0xAFFFAAAF + +typedef struct { + uint64_t swVersion; + int16_t sdbFileVersion; + char reserved[6]; + TSCKSUM checkSum; +} SSdbHeader; + +typedef struct { + char type; + // short rowSize; + char *row; +} SSdbUpdate; + +typedef struct { + char numOfTables; + uint64_t version[]; +} SSdbSync; + +typedef struct { + SSdbHeader header; + int maxRows; + int dbId; + int32_t maxRowSize; + char name[TSDB_DB_NAME_LEN]; + char fn[128]; + int keyType; + uint32_t autoIndex; + int64_t numOfRows; + int64_t id; + int64_t size; + void * iHandle; + int fd; + void *(*appTool)(char, void *, char *, int, int *); + pthread_mutex_t mutex; + SSdbUpdate * update; + int numOfUpdates; + int updatePos; +} SSdbTable; + +typedef struct { + int64_t id; + int64_t offset; + int rowSize; + void * row; +} SRowMeta; + +typedef struct { + int32_t delimiter; + int32_t rowSize; + int64_t id; + char data[]; +} SRowHead; + +typedef struct { + char * buffer; + char * offset; + int trans; + int bufferSize; + pthread_mutex_t qmutex; +} STranQueue; + +typedef struct { + char status; + char role; + char numOfMnodes; + uint64_t dbVersion; + uint32_t numOfDnodes; + uint32_t publicIp; +} SMnodeStatus; + +typedef struct { + char dbId; + char type; + uint64_t version; + short dataLen; + char data[]; +} SForwardMsg; + +extern SSdbTable *tableList[]; +extern int sdbMaxPeers; +extern int sdbDebugFlag; +extern int sdbNumOfTables; +extern int64_t sdbVersion; + +int sdbForwardDbReqToPeer(SSdbTable *pTable, char type, char *data, int dataLen); +int sdbRetrieveRows(int fd, SSdbTable *pTable, uint64_t version); +void sdbResetTable(SSdbTable *pTable); +extern const int16_t sdbFileVersion; + +#endif diff --git a/src/sdb/src/hashint.c b/src/sdb/src/hashint.c new file mode 100644 index 000000000000..c99a5aff737f --- /dev/null +++ b/src/sdb/src/hashint.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tmempool.h" +#include "tsdb.h" + +typedef struct _long_hash_t { + uint32_t key; + int hash; + struct _long_hash_t *prev; + struct _long_hash_t *next; + char data[]; +} SLongHash; + +typedef struct { + SLongHash **longHashList; + mpool_h longHashMemPool; + int maxSessions; + int dataSize; +} SHashObj; + +int sdbHashLong(void *handle, uint32_t ip) { + SHashObj *pObj = (SHashObj *)handle; + int hash = 0; + + hash = ip >> 16; + hash += (ip & 0xFFFF); + + hash = hash % pObj->maxSessions; + + return hash; +} + +void *sdbAddIntHash(void *handle, void *pKey, void *data) { + int hash; + SLongHash *pNode; + SHashObj * pObj; + uint32_t key = *((uint32_t *)pKey); + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = sdbHashLong(pObj, key); + pNode = (SLongHash *)taosMemPoolMalloc(pObj->longHashMemPool); + pNode->key = key; + memcpy(pNode->data, data, pObj->dataSize); + pNode->prev = 0; + pNode->next = pObj->longHashList[hash]; + pNode->hash = hash; + + if (pObj->longHashList[hash] != 0) (pObj->longHashList[hash])->prev = pNode; + pObj->longHashList[hash] = pNode; + + return pObj; +} + +void sdbDeleteIntHash(void *handle, void *pKey) { + int hash; + SLongHash *pNode; + SHashObj * pObj; + uint32_t key = *((uint32_t *)pKey); + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + hash = sdbHashLong(pObj, key); + + pNode = pObj->longHashList[hash]; + while (pNode) { + if (pNode->key == key) break; + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->longHashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + taosMemPoolFree(pObj->longHashMemPool, (char *)pNode); + } +} + +void *sdbGetIntHashData(void *handle, void *pKey) { + int hash; + SLongHash *pNode; + SHashObj * pObj; + uint32_t key = *((uint32_t *)pKey); + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = sdbHashLong(pObj, key); + pNode = pObj->longHashList[hash]; + + while (pNode) { + if (pNode->key == key) { + break; + } + pNode = pNode->next; + } + + if (pNode) return pNode->data; + + return NULL; +} + +void *sdbOpenIntHash(int maxSessions, int dataSize) { + SLongHash **longHashList; + mpool_h longHashMemPool; + SHashObj * pObj; + + longHashMemPool = taosMemPoolInit(maxSessions, sizeof(SLongHash) + dataSize); + if (longHashMemPool == 0) return NULL; + + longHashList = calloc(sizeof(SLongHash *), maxSessions); + if (longHashList == 0) { + taosMemPoolCleanUp(longHashMemPool); + return NULL; + } + + pObj = malloc(sizeof(SHashObj)); + if (pObj == NULL) { + taosMemPoolCleanUp(longHashMemPool); + free(longHashList); + return NULL; + } + + pObj->maxSessions = maxSessions; + pObj->longHashMemPool = longHashMemPool; + pObj->longHashList = longHashList; + pObj->dataSize = dataSize; + + return pObj; +} + +void sdbCloseIntHash(void *handle) { + SHashObj *pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + if (pObj->longHashMemPool) taosMemPoolCleanUp(pObj->longHashMemPool); + + if (pObj->longHashList) free(pObj->longHashList); + + memset(pObj, 0, sizeof(SHashObj)); + free(pObj); +} + +void *sdbFetchIntHashData(void *handle, void *ptr, void **ppMeta) { + SHashObj * pObj = (SHashObj *)handle; + SLongHash *pNode = (SLongHash *)ptr; + int hash = 0; + + *ppMeta = NULL; + if (pObj == NULL || pObj->maxSessions <= 0) return NULL; + if (pObj->longHashList == NULL) return NULL; + + if (pNode) { + hash = pNode->hash + 1; + pNode = pNode->next; + } + + if (pNode == NULL) { + for (int i = hash; i < pObj->maxSessions; ++i) { + pNode = pObj->longHashList[i]; + if (pNode) break; + } + } + + if (pNode) *ppMeta = pNode->data; + + return pNode; +} diff --git a/src/sdb/src/hashstr.c b/src/sdb/src/hashstr.c new file mode 100644 index 000000000000..1a9a7fefb4f1 --- /dev/null +++ b/src/sdb/src/hashstr.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include "tsdb.h" + +#define MAX_STR_LEN 40 + +typedef struct _str_node_t { + char string[TSDB_METER_ID_LEN]; + int hash; + struct _str_node_t *prev; + struct _str_node_t *next; + char data[]; +} SHashNode; + +typedef struct { + SHashNode **hashList; + int maxSessions; + int dataSize; +} SHashObj; + +int sdbHashString(void *handle, char *string) { + SHashObj * pObj = (SHashObj *)handle; + unsigned int hash = 0, hashv; + char * c; + int len = strlen(string); + + c = string; + + while (len >= 4) { + hash += *((int *)c); + c += 4; + len -= 4; + } + + while (len > 0) { + hash += *c; + c++; + len--; + } + + hashv = hash / pObj->maxSessions; + hash = (hashv + hash % pObj->maxSessions) % pObj->maxSessions; + + return hash; +} + +void *sdbAddStrHash(void *handle, void *key, void *pData) { + int hash; + SHashNode *pNode; + SHashObj * pObj; + char * string = (char *)key; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = sdbHashString(pObj, string); + + int size = sizeof(SHashNode) + pObj->dataSize; + pNode = (SHashNode *)malloc(size); + memset(pNode, 0, size); + strcpy(pNode->string, string); + memcpy(pNode->data, pData, pObj->dataSize); + pNode->prev = 0; + pNode->next = pObj->hashList[hash]; + pNode->hash = hash; + + if (pObj->hashList[hash] != 0) (pObj->hashList[hash])->prev = pNode; + pObj->hashList[hash] = pNode; + + return pNode->data; +} + +void sdbDeleteStrHash(void *handle, void *key) { + int hash; + SHashNode *pNode; + SHashObj * pObj; + char * string = (char *)key; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + hash = sdbHashString(pObj, string); + pNode = pObj->hashList[hash]; + while (pNode) { + if (strcmp(pNode->string, string) == 0) break; + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->hashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + memset(pNode, 0, sizeof(SHashNode)); + free(pNode); + } +} + +void *sdbGetStrHashData(void *handle, void *key) { + int hash; + SHashNode *pNode; + SHashObj * pObj; + char * string = (char *)key; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = sdbHashString(pObj, string); + pNode = pObj->hashList[hash]; + + while (pNode) { + if (strcmp(pNode->string, string) == 0) { + break; + } + pNode = pNode->next; + } + + if (pNode) return pNode->data; + + return NULL; +} + +void *sdbOpenStrHash(int maxSessions, int dataSize) { + SHashObj *pObj; + + pObj = (SHashObj *)malloc(sizeof(SHashObj)); + if (pObj == NULL) { + return NULL; + } + + memset(pObj, 0, sizeof(SHashObj)); + pObj->maxSessions = maxSessions; + pObj->dataSize = dataSize; + + pObj->hashList = (SHashNode **)malloc(sizeof(SHashNode *) * maxSessions); + if (pObj->hashList == NULL) { + free(pObj); + return NULL; + } + memset(pObj->hashList, 0, sizeof(SHashNode *) * maxSessions); + + return (void *)pObj; +} + +void sdbCloseStrHash(void *handle) { + SHashObj *pObj; + SHashNode *pNode, *pNext; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions <= 0) return; + + if (pObj->hashList) { + for (int i = 0; i < pObj->maxSessions; ++i) { + pNode = pObj->hashList[i]; + while (pNode) { + pNext = pNode->next; + free(pNode); + pNode = pNext; + } + } + + free(pObj->hashList); + } + + memset(pObj, 0, sizeof(SHashObj)); + free(pObj); +} + +void *sdbFetchStrHashData(void *handle, void *ptr, void **ppMeta) { + SHashObj *pObj = (SHashObj *)handle; + SHashNode *pNode = (SHashNode *)ptr; + int hash = 0; + + *ppMeta = NULL; + if (pObj == NULL || pObj->maxSessions <= 0) return NULL; + if (pObj->hashList == NULL) return NULL; + + if (pNode) { + hash = pNode->hash + 1; + pNode = pNode->next; + } + + if (pNode == NULL) { + for (int i = hash; i < pObj->maxSessions; ++i) { + pNode = pObj->hashList[i]; + if (pNode) break; + } + } + + if (pNode) *ppMeta = pNode->data; + + return pNode; +} diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c new file mode 100644 index 000000000000..f92fc663c1f7 --- /dev/null +++ b/src/sdb/src/sdbEngine.c @@ -0,0 +1,917 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sdb.h" +#include "sdbint.h" +#include "tutil.h" + +#define abs(x) (((x) < 0) ? -(x) : (x)) + +extern char version[]; +const int16_t sdbFileVersion = 0; + +void *(*sdbInitIndexFp[])(int maxRows, int dataSize) = {sdbOpenStrHash, sdbOpenIntHash, sdbOpenIntHash}; + +void *(*sdbAddIndexFp[])(void *handle, void *key, void *data) = {sdbAddStrHash, sdbAddIntHash, sdbAddIntHash}; + +void (*sdbDeleteIndexFp[])(void *handle, void *key) = {sdbDeleteStrHash, sdbDeleteIntHash, sdbDeleteIntHash}; + +void *(*sdbGetIndexFp[])(void *handle, void *key) = {sdbGetStrHashData, sdbGetIntHashData, sdbGetIntHashData}; + +void (*sdbCleanUpIndexFp[])(void *handle) = { + sdbCloseStrHash, sdbCloseIntHash, sdbCloseIntHash, +}; + +void *(*sdbFetchRowFp[])(void *handle, void *ptr, void **ppRow) = { + sdbFetchStrHashData, sdbFetchIntHashData, sdbFetchIntHashData, +}; + +SSdbTable *tableList[20]; +int sdbNumOfTables; +int64_t sdbVersion; + +void sdbFinishCommit(void *handle) { + SSdbTable *pTable = (SSdbTable *)handle; + uint32_t sdbEcommit = SDB_ENDCOMMIT; + + off_t offset = lseek(pTable->fd, 0, SEEK_END); + assert(offset == pTable->size); + write(pTable->fd, &sdbEcommit, sizeof(sdbEcommit)); + pTable->size += sizeof(sdbEcommit); +} + +int sdbOpenSdbFile(SSdbTable *pTable) { + struct stat fstat, ofstat; + uint64_t size; + char * dirc = NULL; + char * basec = NULL; + union { + char cversion[64]; + uint64_t iversion; + } swVersion; + + memcpy(swVersion.cversion, version, sizeof(uint64_t)); + + // check sdb.db and .sdb.db status + char fn[128] = "\0"; + dirc = strdup(pTable->fn); + basec = strdup(pTable->fn); + sprintf(fn, "%s/.%s", dirname(dirc), basename(basec)); + tfree(dirc); + tfree(basec); + if (stat(fn, &ofstat) == 0) { // .sdb.db file exists + if (stat(pTable->fn, &fstat) == 0) { + remove(fn); + } else { + remove(pTable->fn); + rename(fn, pTable->fn); + } + } + + pTable->fd = open(pTable->fn, O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + if (pTable->fd < 0) { + sdbError("failed to open file:%s", pTable->fn); + return -1; + } + + pTable->size = 0; + stat(pTable->fn, &fstat); + size = sizeof(pTable->header); + + if (fstat.st_size == 0) { + pTable->header.swVersion = swVersion.iversion; + pTable->header.sdbFileVersion = sdbFileVersion; + if (taosCalcChecksumAppend(0, (uint8_t *)(&pTable->header), size) < 0) { + sdbError("failed to get file header checksum, file: %s", pTable->fn); + tclose(pTable->fd); + return -1; + } + write(pTable->fd, &(pTable->header), size); + pTable->size += size; + sdbFinishCommit(pTable); + } else { + uint32_t sdbEcommit = 0; + off_t offset = lseek(pTable->fd, -(sizeof(sdbEcommit)), SEEK_END); + while (offset > 0) { + read(pTable->fd, &sdbEcommit, sizeof(sdbEcommit)); + if (sdbEcommit == SDB_ENDCOMMIT) { + ftruncate(pTable->fd, offset + sizeof(sdbEcommit)); + break; + } + offset = lseek(pTable->fd, -(sizeof(sdbEcommit) + 1), SEEK_CUR); + } + lseek(pTable->fd, 0, SEEK_SET); + + ssize_t tsize = read(pTable->fd, &(pTable->header), size); + if (tsize < size) { + sdbError("failed to read sdb file header, file: %s", pTable->fn); + tclose(pTable->fd); + return -1; + } + + if (pTable->header.swVersion != swVersion.iversion) { + sdbWarn("sdb file %s version not match software version", pTable->fn); + } + + if (!taosCheckChecksumWhole((uint8_t *)(&pTable->header), size)) { + sdbError("sdb file header is broken since checksum mismatch, file: %s", pTable->fn); + tclose(pTable->fd); + return -1; + } + + pTable->size += size; + // skip end commit symbol + lseek(pTable->fd, sizeof(sdbEcommit), SEEK_CUR); + pTable->size += sizeof(sdbEcommit); + } + + pTable->numOfRows = 0; + + return pTable->fd; +} + +// TODO: Change here +void sdbAddIntoUpdateList(SSdbTable *pTable, char type, char *row) { + pTable->numOfUpdates++; + pTable->updatePos = pTable->numOfUpdates % pTable->maxRows; + + if (pTable->update[pTable->updatePos].type == SDB_TYPE_DELETE) + (*(pTable->appTool))(SDB_TYPE_DESTROY, pTable->update[pTable->updatePos].row, NULL, 0, NULL); + + pTable->update[pTable->updatePos].type = type; + pTable->update[pTable->updatePos].row = row; +} + +int sdbInitTableByFile(SSdbTable *pTable) { + SRowMeta rowMeta; + int numOfDels = 0; + int bytes = 0; + int64_t oldId = 0; + void * pMetaRow = NULL; + int total_size = 0; + int real_size = 0; + + oldId = pTable->id; + if (sdbOpenSdbFile(pTable) < 0) return -1; + + total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); + SRowHead *rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + return -1; + } + + // Loop to read sdb file row by row + while (1) { + memset(rowHead, 0, total_size); + + bytes = read(pTable->fd, rowHead, sizeof(SRowHead)); + if (bytes < 0) { + sdbError("failed to read sdb file: %s", pTable->fn); + goto sdb_exit1; + } + + if (bytes == 0) break; + + if (bytes < sizeof(SRowHead) || rowHead->delimiter != SDB_DELIMITER) { + pTable->size++; + lseek(pTable->fd, -(bytes - 1), SEEK_CUR); + continue; + } + + if (rowHead->rowSize < 0 || rowHead->rowSize > pTable->maxRowSize) { + sdbError("error row size in sdb file: %s rowSize: %d maxRowSize: %d", pTable->fn, rowHead->rowSize, + pTable->maxRowSize); + pTable->size += sizeof(SRowHead); + continue; + } + + // sdbTrace("%s id:%ld rowSize:%d", pTable->name, rowHead->id, + // rowHead->rowSize); + + bytes = read(pTable->fd, rowHead->data, rowHead->rowSize + sizeof(TSCKSUM)); + if (bytes < rowHead->rowSize + sizeof(TSCKSUM)) { + // TODO: Here may cause pTable->size not end of the file + sdbError("failed to read sdb file: %s id: %d rowSize: %d", pTable->fn, rowHead->id, rowHead->rowSize); + break; + } + + real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); + if (!taosCheckChecksumWhole((uint8_t *)rowHead, real_size)) { + sdbError("error sdb checksum, sdb: %s id: %d, skip", pTable->name, rowHead->id); + pTable->size += real_size; + continue; + } + + // Check if the the object exists already + + pMetaRow = sdbGetRow(pTable, rowHead->data); + if (pMetaRow == NULL) { // New object + if (rowHead->id < 0) { + /* assert(0); */ + sdbError("error sdb negative id: %d, sdb: %s, skip", rowHead->id, pTable->name); + } else { + rowMeta.id = rowHead->id; + // TODO: Get rid of the rowMeta.offset and rowSize + rowMeta.offset = pTable->size; + rowMeta.rowSize = rowHead->rowSize; + rowMeta.row = (*(pTable->appTool))(SDB_TYPE_DECODE, NULL, rowHead->data, rowHead->rowSize, NULL); + (*sdbAddIndexFp[pTable->keyType])(pTable->iHandle, rowMeta.row, &rowMeta); + if (pTable->keyType == SDB_KEYTYPE_AUTO) pTable->autoIndex++; + pTable->numOfRows++; + } + } else { // already exists + if (rowHead->id < 0) { // Delete the object + (*sdbDeleteIndexFp[pTable->keyType])(pTable->iHandle, rowHead->data); + (*(pTable->appTool))(SDB_TYPE_DESTROY, pMetaRow, NULL, 0, NULL); + pTable->numOfRows--; + numOfDels++; + } else { // Reset the object TODO: is it possible to merge reset and + // update ?? + (*(pTable->appTool))(SDB_TYPE_RESET, pMetaRow, rowHead->data, rowHead->rowSize, NULL); + } + numOfDels++; + } + + pTable->size += real_size; + if (pTable->id < abs(rowHead->id)) pTable->id = abs(rowHead->id); + } + + sdbVersion += (pTable->id - oldId); + if (numOfDels > pTable->maxRows / 4) sdbSaveSnapShot(pTable); + + pTable->numOfUpdates = 0; + pTable->updatePos = 0; + + tfree(rowHead); + return 0; + +sdb_exit1: + tfree(rowHead); + return -1; +} + +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, + void *(*appTool)(char, void *, char *, int, int *)) { + SSdbTable *pTable = (SSdbTable *)malloc(sizeof(SSdbTable)); + if (pTable == NULL) return NULL; + memset(pTable, 0, sizeof(SSdbTable)); + + int size = sizeof(SSdbUpdate) * maxRows; + pTable->update = (SSdbUpdate *)malloc(size); + if (pTable->update == NULL) { + free(pTable); + return NULL; + }; + memset(pTable->update, 0, size); + + strcpy(pTable->name, name); + pTable->keyType = keyType; + pTable->maxRows = maxRows; + pTable->maxRowSize = maxRowSize; + pTable->appTool = appTool; + sprintf(pTable->fn, "%s/%s.db", directory, pTable->name); + + if (sdbInitIndexFp[keyType] != NULL) pTable->iHandle = (*sdbInitIndexFp[keyType])(maxRows, sizeof(SRowMeta)); + + pthread_mutex_init(&pTable->mutex, NULL); + + if (sdbInitTableByFile(pTable) < 0) return NULL; + + pTable->dbId = sdbNumOfTables++; + tableList[pTable->dbId] = pTable; + + sdbTrace("table:%s is initialized, numOfRows:%d, numOfTables:%d", pTable->name, pTable->numOfRows, sdbNumOfTables); + + return pTable; +} + +SRowMeta *sdbGetRowMeta(void *handle, void *key) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta; + + if (handle == NULL) return NULL; + + pMeta = (*sdbGetIndexFp[pTable->keyType])(pTable->iHandle, key); + + return pMeta; +} + +void *sdbGetRow(void *handle, void *key) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta; + + if (handle == NULL) return NULL; + + pthread_mutex_lock(&pTable->mutex); + pMeta = (*sdbGetIndexFp[pTable->keyType])(pTable->iHandle, key); + pthread_mutex_unlock(&pTable->mutex); + + if (pMeta == NULL) return NULL; + + return pMeta->row; +} + +// row here must be encoded string (rowSize > 0) or the object it self (rowSize +// = 0) +int64_t sdbInsertRow(void *handle, void *row, int rowSize) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta rowMeta; + int64_t id = -1; + void * pObj = NULL; + int total_size = 0; + int real_size = 0; + /* char action = SDB_TYPE_INSERT; */ + + if (pTable == NULL) return -1; + + if ((pTable->keyType != SDB_KEYTYPE_AUTO) || *((int64_t *)row)) + if (sdbGetRow(handle, row)) return -1; + + total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); + SRowHead *rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + return -1; + } + memset(rowHead, 0, total_size); + + if (rowSize == 0) { // object is created already + pObj = row; + } else { // encoded string, to create object + pObj = (*(pTable->appTool))(SDB_TYPE_DECODE, NULL, row, rowSize, NULL); + } + (*(pTable->appTool))(SDB_TYPE_ENCODE, pObj, rowHead->data, pTable->maxRowSize, &(rowHead->rowSize)); + assert(rowHead->rowSize > 0 && rowHead->rowSize <= pTable->maxRowSize); + + pthread_mutex_lock(&pTable->mutex); + + pTable->id++; + sdbVersion++; + if (pTable->keyType == SDB_KEYTYPE_AUTO) { + // TODO: here need to change + *((uint32_t *)pObj) = ++pTable->autoIndex; + (*(pTable->appTool))(SDB_TYPE_ENCODE, pObj, rowHead->data, pTable->maxRowSize, &(rowHead->rowSize)); + } + + real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); + + rowHead->delimiter = SDB_DELIMITER; + rowHead->id = pTable->id; + if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, real_size) < 0) { + sdbError("failed to get checksum while inserting, sdb: %s", pTable->name); + pthread_mutex_unlock(&pTable->mutex); + tfree(rowHead); + return -1; + } + + // update in SDB layer + rowMeta.id = pTable->id; + rowMeta.offset = pTable->size; + rowMeta.rowSize = rowHead->rowSize; + rowMeta.row = pObj; + (*sdbAddIndexFp[pTable->keyType])(pTable->iHandle, pObj, &rowMeta); + + /* Update the disk content */ + /* write(pTable->fd, &action, sizeof(action)); */ + /* pTable->size += sizeof(action); */ + write(pTable->fd, rowHead, real_size); + pTable->size += real_size; + sdbFinishCommit(pTable); + + sdbAddIntoUpdateList(pTable, SDB_TYPE_INSERT, rowMeta.row); + + pTable->numOfRows++; + switch (pTable->keyType) { + case SDB_KEYTYPE_STRING: + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, (char *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + break; + case SDB_KEYTYPE_UINT32: + case SDB_KEYTYPE_AUTO: + sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, *(int32_t *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + break; + default: + sdbTrace("table:%s, a record is inserted, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + pTable->name, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); + break; + } + + id = rowMeta.id; + + tfree(rowHead); + + pthread_mutex_unlock(&pTable->mutex); + + /* callback function to update the MGMT layer */ + if (id >= 0 && pTable->appTool) (*pTable->appTool)(SDB_TYPE_INSERT, pObj, NULL, 0, NULL); + + return id; +} + +// row here can be object or null-terminated string +int sdbDeleteRow(void *handle, void *row) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta = NULL; + int code = -1; + void * pMetaRow = NULL; + SRowHead * rowHead = NULL; + int rowSize = 0; + int total_size = 0; + /* char action = SDB_TYPE_DELETE; */ + + if (pTable == NULL) return -1; + + pMeta = sdbGetRowMeta(handle, row); + if (pMeta == NULL) { + sdbTrace("table:%s, record is not there, delete failed", pTable->name); + return -1; + } + + pMetaRow = pMeta->row; + assert(pMetaRow != NULL); + + switch (pTable->keyType) { + case SDB_KEYTYPE_STRING: + rowSize = strlen((char *)row) + 1; + break; + case SDB_KEYTYPE_UINT32: + rowSize = sizeof(uint32_t); + break; + case SDB_KEYTYPE_AUTO: + rowSize = sizeof(uint64_t); + break; + default: + return -1; + } + + total_size = sizeof(SRowHead) + rowSize + sizeof(TSCKSUM); + rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + return -1; + } + memset(rowHead, 0, total_size); + + pthread_mutex_lock(&pTable->mutex); + + pTable->id++; + sdbVersion++; + + rowHead->delimiter = SDB_DELIMITER; + rowHead->rowSize = rowSize; + rowHead->id = -(pTable->id); + memcpy(rowHead->data, row, rowSize); + if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, total_size) < 0) { + sdbError("failed to get checksum while inserting, sdb: %s", pTable->name); + pthread_mutex_unlock(&pTable->mutex); + tfree(rowHead); + return -1; + } + /* write(pTable->fd, &action, sizeof(action)); */ + /* pTable->size += sizeof(action); */ + write(pTable->fd, rowHead, total_size); + pTable->size += total_size; + sdbFinishCommit(pTable); + + pTable->numOfRows--; + // TODO: Change the update list here + sdbAddIntoUpdateList(pTable, SDB_TYPE_DELETE, pMetaRow); + switch (pTable->keyType) { + case SDB_KEYTYPE_STRING: + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); + break; + case SDB_KEYTYPE_UINT32: + case SDB_KEYTYPE_AUTO: + sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); + break; + default: + sdbTrace("table:%s, a record is deleted, sdbVersion:%ld id:%ld numOfRows:%d", pTable->name, sdbVersion, + pTable->id, pTable->numOfRows); + break; + } + + // Delete from current layer + (*sdbDeleteIndexFp[pTable->keyType])(pTable->iHandle, row); + + code = 0; + + pthread_mutex_unlock(&pTable->mutex); + + tfree(rowHead); + + // callback function of the delete + if (code == 0 && pTable->appTool) (*pTable->appTool)(SDB_TYPE_DELETE, pMetaRow, NULL, 0, NULL); + + return code; +} + +// row here can be the object or the string info (encoded string) +int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta = NULL; + int code = -1; + int total_size = 0; + int real_size = 0; + /* char action = SDB_TYPE_UPDATE; */ + + if (pTable == NULL || row == NULL) return -1; + pMeta = sdbGetRowMeta(handle, row); + if (pMeta == NULL) { + sdbTrace("table:%s, record is not there, update failed", pTable->name); + return -1; + } + + void *pMetaRow = pMeta->row; + assert(pMetaRow != NULL); + + total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); + SRowHead *rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + return -1; + } + memset(rowHead, 0, total_size); + + if (!isUpdated) { + (*(pTable->appTool))(SDB_TYPE_UPDATE, pMetaRow, row, updateSize, NULL); // update in upper layer + } + + if (pMetaRow != row) { + memcpy(rowHead->data, row, updateSize); + rowHead->rowSize = updateSize; + } else { + (*(pTable->appTool))(SDB_TYPE_ENCODE, pMetaRow, rowHead->data, pTable->maxRowSize, &(rowHead->rowSize)); + } + + real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); + ; + + pthread_mutex_lock(&pTable->mutex); + + pTable->id++; + sdbVersion++; + + // write to the new position + rowHead->delimiter = SDB_DELIMITER; + rowHead->id = pTable->id; + if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, real_size) < 0) { + sdbError("failed to get checksum, sdb: %s id: %d", pTable->name, rowHead->id); + pthread_mutex_unlock(&pTable->mutex); + tfree(rowHead); + return -1; + } + /* write(pTable->fd, &action, sizeof(action)); */ + /* pTable->size += sizeof(action); */ + write(pTable->fd, rowHead, real_size); + + pMeta->id = pTable->id; + pMeta->offset = pTable->size; + pMeta->rowSize = rowHead->rowSize; + pTable->size += real_size; + + sdbFinishCommit(pTable); + + switch (pTable->keyType) { + case SDB_KEYTYPE_STRING: + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); + break; + case SDB_KEYTYPE_UINT32: + case SDB_KEYTYPE_AUTO: + sdbTrace("table:%s, a record is updated:%d, sdbVersion:%ld id:%ld numOfRows:%d", + pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); + break; + default: + sdbTrace("table:%s, a record is updated, sdbVersion:%ld id:%ld numOfRows:%d", pTable->name, sdbVersion, + pTable->id, pTable->numOfRows); + break; + } + + sdbAddIntoUpdateList(pTable, SDB_TYPE_UPDATE, pMetaRow); + code = 0; + + pthread_mutex_unlock(&pTable->mutex); + + tfree(rowHead); + + return code; +} + +// row here must be the instruction string +int sdbBatchUpdateRow(void *handle, void *row, int rowSize) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta = NULL; + int total_size = 0; + /* char action = SDB_TYPE_BATCH_UPDATE; */ + + if (pTable == NULL || row == NULL || rowSize <= 0) return -1; + pMeta = sdbGetRowMeta(handle, row); + if (pMeta == NULL) { + sdbTrace("table: %s, record is not there, batch update failed", pTable->name); + return -1; + } + + void *pMetaRow = pMeta->row; + assert(pMetaRow != NULL); + + total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); + SRowHead *rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + sdbError("failed to allocate row head memory, sdb: %s", pTable->name); + return -1; + } + + pthread_mutex_lock(&pTable->mutex); + + (*(pTable->appTool))(SDB_TYPE_BEFORE_BATCH_UPDATE, pMetaRow, NULL, 0, NULL); + + void *next_row = pMetaRow; + while (next_row != NULL) { + pTable->id++; + sdbVersion++; + + void *last_row = next_row; + next_row = (*(pTable->appTool))(SDB_TYPE_BATCH_UPDATE, last_row, (char *)row, rowSize, 0); + memset(rowHead, 0, sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM)); + + // update in current layer + pMeta->id = pTable->id; + pMeta->offset = pTable->size; + + // write to disk + rowHead->delimiter = SDB_DELIMITER; + rowHead->id = pMeta->id; + (*(pTable->appTool))(SDB_TYPE_ENCODE, last_row, rowHead->data, pTable->maxRowSize, &(rowHead->rowSize)); + taosCalcChecksumAppend(0, (uint8_t *)rowHead, sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM)); + pMeta->rowSize = rowHead->rowSize; + lseek(pTable->fd, pTable->size, SEEK_SET); + write(pTable->fd, rowHead, sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM)); + pTable->size += (sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM)); + + sdbAddIntoUpdateList(pTable, SDB_TYPE_UPDATE, last_row); + + if (next_row != NULL) { + pMeta = sdbGetRowMeta(handle, next_row); + } + } + + sdbFinishCommit(pTable); + + (*(pTable->appTool))(SDB_TYPE_AFTER_BATCH_UPDATE, pMetaRow, NULL, 0, NULL); + + pthread_mutex_unlock(&pTable->mutex); + + tfree(rowHead); + + return 0; +} + +void sdbCloseTable(void *handle) { + SSdbTable *pTable = (SSdbTable *)handle; + void * pNode = NULL; + void * row; + + if (pTable == NULL) return; + + while (1) { + pNode = sdbFetchRow(handle, pNode, &row); + if (row == NULL) break; + (*(pTable->appTool))(SDB_TYPE_DESTROY, row, NULL, 0, NULL); + } + + if (sdbCleanUpIndexFp[pTable->keyType]) (*sdbCleanUpIndexFp[pTable->keyType])(pTable->iHandle); + + if (pTable->fd) tclose(pTable->fd); + + pthread_mutex_destroy(&pTable->mutex); + + sdbNumOfTables--; + sdbTrace("table:%s is closed, id:%ld numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); + + tfree(pTable->update); + tfree(pTable); +} + +void sdbResetTable(SSdbTable *pTable) { + /* SRowHead rowHead; */ + SRowMeta rowMeta; + int bytes; + int total_size = 0; + int real_size = 0; + int64_t oldId; + SRowHead *rowHead = NULL; + void * pMetaRow = NULL; + + oldId = pTable->id; + if (sdbOpenSdbFile(pTable) < 0) return; + + total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); + rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + return; + } + + while (1) { + memset(rowHead, 0, total_size); + + bytes = read(pTable->fd, rowHead, sizeof(SRowHead)); + if (bytes < 0) { + sdbError("failed to read sdb file: %s", pTable->fn); + tfree(rowHead); + return; + } + + if (bytes == 0) break; + + if (bytes < sizeof(SRowHead) || rowHead->delimiter != SDB_DELIMITER) { + pTable->size++; + lseek(pTable->fd, -(bytes - 1), SEEK_CUR); + continue; + } + + if (rowHead->rowSize < 0 || rowHead->rowSize > pTable->maxRowSize) { + sdbError("error row size in sdb file: %s rowSize: %d maxRowSize: %d", pTable->fn, rowHead->rowSize, + pTable->maxRowSize); + pTable->size += sizeof(SRowHead); + continue; + } + + bytes = read(pTable->fd, rowHead->data, rowHead->rowSize + sizeof(TSCKSUM)); + if (bytes < rowHead->rowSize + sizeof(TSCKSUM)) { + sdbError("failed to read sdb file: %s id: %d rowSize: %d", pTable->fn, rowHead->id, rowHead->rowSize); + break; + } + + real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); + if (!taosCheckChecksumWhole((uint8_t *)rowHead, real_size)) { + sdbError("error sdb checksum, sdb: %s id: %d, skip", pTable->name, rowHead->id); + pTable->size += real_size; + continue; + } + + if (abs(rowHead->id) > oldId) { // not operated + pMetaRow = sdbGetRow(pTable, rowHead->data); + if (pMetaRow == NULL) { // New object + if (rowHead->id < 0) { + sdbError("error sdb negative id: %d, sdb: %s, skip", rowHead->id, pTable->name); + } else { + rowMeta.id = rowHead->id; + // TODO: Get rid of the rowMeta.offset and rowSize + rowMeta.offset = pTable->size; + rowMeta.rowSize = rowHead->rowSize; + rowMeta.row = (*(pTable->appTool))(SDB_TYPE_DECODE, NULL, rowHead->data, rowHead->rowSize, NULL); + (*sdbAddIndexFp[pTable->keyType])(pTable->iHandle, rowMeta.row, &rowMeta); + pTable->numOfRows++; + + (*pTable->appTool)(SDB_TYPE_INSERT, rowMeta.row, NULL, 0, NULL); + } + } else { // already exists + if (rowHead->id < 0) { // Delete the object + (*sdbDeleteIndexFp[pTable->keyType])(pTable->iHandle, rowHead->data); + (*(pTable->appTool))(SDB_TYPE_DESTROY, pMetaRow, NULL, 0, NULL); + pTable->numOfRows--; + } else { // update the object + (*(pTable->appTool))(SDB_TYPE_UPDATE, pMetaRow, rowHead->data, rowHead->rowSize, NULL); + } + } + } + + pTable->size += real_size; + if (pTable->id < abs(rowHead->id)) pTable->id = abs(rowHead->id); + } + + sdbVersion += (pTable->id - oldId); + pTable->numOfUpdates = 0; + pTable->updatePos = 0; + + tfree(rowHead); + + sdbTrace("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id); +} + +// TODO: A problem here : use snapshot file to sync another node will cause +// problem +void sdbSaveSnapShot(void *handle) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta; + void * pNode = NULL; + int total_size = 0; + int real_size = 0; + int size = 0; + int numOfRows = 0; + uint32_t sdbEcommit = SDB_ENDCOMMIT; + char * dirc = NULL; + char * basec = NULL; + /* char action = SDB_TYPE_INSERT; */ + + if (pTable == NULL) return; + + sdbTrace("Table:%s, save the snapshop", pTable->name); + + char fn[128] = "\0"; + dirc = strdup(pTable->fn); + basec = strdup(pTable->fn); + sprintf(fn, "%s/.%s", dirname(dirc), basename(basec)); + int fd = open(fn, O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + tfree(dirc); + tfree(basec); + + total_size = sizeof(SRowHead) + pTable->maxRowSize + sizeof(TSCKSUM); + SRowHead *rowHead = (SRowHead *)malloc(total_size); + if (rowHead == NULL) { + sdbError("failed to allocate memory while saving SDB snapshot, sdb: %s", pTable->name); + return; + } + memset(rowHead, 0, size); + + // Write the header + write(fd, &(pTable->header), sizeof(SSdbHeader)); + size += sizeof(SSdbHeader); + write(fd, &sdbEcommit, sizeof(sdbEcommit)); + size += sizeof(sdbEcommit); + + while (1) { + pNode = (*sdbFetchRowFp[pTable->keyType])(pTable->iHandle, pNode, (void **)&pMeta); + if (pMeta == NULL) break; + + rowHead->delimiter = SDB_DELIMITER; + rowHead->id = pMeta->id; + (*(pTable->appTool))(SDB_TYPE_ENCODE, pMeta->row, rowHead->data, pTable->maxRowSize, &(rowHead->rowSize)); + real_size = sizeof(SRowHead) + rowHead->rowSize + sizeof(TSCKSUM); + if (taosCalcChecksumAppend(0, (uint8_t *)rowHead, real_size) < 0) { + sdbError("failed to get checksum while save sdb %s snapshot", pTable->name); + tfree(rowHead); + return; + } + + /* write(fd, &action, sizeof(action)); */ + /* size += sizeof(action); */ + write(fd, rowHead, real_size); + size += real_size; + write(fd, &sdbEcommit, sizeof(sdbEcommit)); + size += sizeof(sdbEcommit); + numOfRows++; + } + + tfree(rowHead); + + // Remove the old file + tclose(pTable->fd); + remove(pTable->fn); + // Rename the .sdb.db file to sdb.db file + rename(fn, pTable->fn); + pTable->fd = fd; + pTable->size = size; + pTable->numOfRows = numOfRows; + + fdatasync(pTable->fd); +} + +void *sdbFetchRow(void *handle, void *pNode, void **ppRow) { + SSdbTable *pTable = (SSdbTable *)handle; + SRowMeta * pMeta; + + *ppRow = NULL; + if (pTable == NULL) return NULL; + + pNode = (*sdbFetchRowFp[pTable->keyType])(pTable->iHandle, pNode, (void **)&pMeta); + if (pMeta == NULL) return NULL; + + *ppRow = pMeta->row; + + return pNode; +} + +int64_t sdbGetId(void *handle) { return ((SSdbTable *)handle)->id; } + +int64_t sdbGetNumOfRows(void *handle) { return ((SSdbTable *)handle)->numOfRows; } diff --git a/src/sdb/src/sdbstr.c b/src/sdb/src/sdbstr.c new file mode 100644 index 000000000000..e873507d237a --- /dev/null +++ b/src/sdb/src/sdbstr.c @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +char* sdbStatusStr[] = {"offline", "unsynced", "syncing", "serving", "null"}; + +char* sdbRoleStr[] = {"unauthed", "undecided", "master", "slave", "null"}; diff --git a/src/system/CMakeLists.txt b/src/system/CMakeLists.txt new file mode 100755 index 000000000000..6a7e431978f8 --- /dev/null +++ b/src/system/CMakeLists.txt @@ -0,0 +1,22 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc ../taos/inc ../client/inc ../modules/http/inc ../modules/monitor/inc ../../deps/inc) + +ADD_EXECUTABLE(taosd ${SRC}) +TARGET_LINK_LIBRARIES(taosd taos_static trpc tutil sdb monitor pthread http z) + +SET(PREPARE_ENV_CMD "prepare_env_cmd") +SET(PREPARE_ENV_TARGET "prepare_env_target") +ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${TESTS_OUTPUT_DIR}/cfg/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${TESTS_OUTPUT_DIR}/log/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${TESTS_OUTPUT_DIR}/data/ + COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TESTS_OUTPUT_DIR}/data > ${TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMAND ${CMAKE_COMMAND} -E echo logDir ${TESTS_OUTPUT_DIR}/log >> ${TESTS_OUTPUT_DIR}/cfg/taos.cfg + COMMENT "prepare taosd environment") +ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD}) + diff --git a/src/system/inc/dnodeSystem.h b/src/system/inc/dnodeSystem.h new file mode 100644 index 000000000000..22a90e76edd2 --- /dev/null +++ b/src/system/inc/dnodeSystem.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODESYSTEM_H +#define TDENGINE_DNODESYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +enum _module { TSDB_MOD_HTTP, TSDB_MOD_MONITOR, TSDB_MOD_MAX }; + +typedef struct { + char *name; + int (*initFp)(); + void (*cleanUpFp)(); + int (*startFp)(); + void (*stopFp)(); + int num; + int curNum; + int equalVnodeNum; +} SModule; + +extern uint32_t tsModuleStatus; +extern SModule tsModule[]; +extern pthread_mutex_t dmutex; + +void dnodeCleanUpSystem(); +int dnodeInitSystem(); + +int vnodeInitSystem(); + +int mgmtInitSystem(); +void mgmtCleanUpSystem(); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/system/inc/mgmt.h b/src/system/inc/mgmt.h new file mode 100644 index 000000000000..7d0a2fd03a88 --- /dev/null +++ b/src/system/inc/mgmt.h @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_MGMT_H +#define TDENGINE_MGMT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +#include "sdb.h" +#include "tglobalcfg.h" +#include "thash.h" +#include "tidpool.h" +#include "tlog.h" +#include "tmempool.h" +#include "trpc.h" +#include "tsdb.h" +#include "tsdb.h" +#include "tskiplist.h" +#include "tsocket.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +// internal globals +extern char version[]; +extern void *mgmtTmr; +extern void *mgmtQhandle; +extern void *mgmtTranQhandle; +extern int mgmtShellConns; +extern int mgmtDnodeConns; +extern char mgmtDirectory[]; + +enum _TSDB_VG_STATUS { + TSDB_VG_STATUS_READY, + TSDB_VG_STATUS_IN_PROGRESS, + TSDB_VG_STATUS_COMMITLOG_INIT_FAILED, + TSDB_VG_STATUS_INIT_FAILED, + TSDB_VG_STATUS_FULL +}; + +enum _TSDB_DB_STATUS { TSDB_DB_STATUS_READY, TSDB_DB_STATUS_DROPPING, TSDB_DB_STATUS_DROP_FROM_SDB }; + +enum _TSDB_VN_STATUS { TSDB_VN_STATUS_READY, TSDB_VN_STATUS_DROPPING }; + +typedef struct { + uint32_t privateIp; + int32_t sid; + uint32_t moduleStatus; + int32_t openVnodes; + int32_t numOfVnodes; + int32_t numOfFreeVnodes; + int64_t createdTime; + uint32_t publicIp; + int32_t status; + uint32_t lastAccess; + uint32_t rebootTimes; + uint32_t lastReboot; // time stamp for last reboot + uint16_t numOfCores; // from dnode status msg + uint8_t alternativeRole; // from dnode status msg, 0-any, 1-mgmt, 2-dnode + uint8_t reserveStatus; + float memoryAvailable; // from dnode status msg + float diskAvailable; // from dnode status msg + int32_t bandwidthMb; // config by user + int16_t cpuAvgUsage; // calc from sys.cpu + int16_t memoryAvgUsage; // calc from sys.mem + int16_t diskAvgUsage; // calc from sys.disk + int16_t bandwidthUsage; // calc from sys.band + uint32_t rack; + uint16_t idc; + uint16_t slot; + int32_t customScore; // config by user + float lbScore; // calc in balance function + int16_t lbState; // set in balance function + int16_t lastAllocVnode; // increase while create vnode + SVnodeLoad vload[TSDB_MAX_VNODES]; + char reserved[16]; + char updateEnd[1]; +} SDnodeObj; + +typedef struct { + uint32_t ip; + uint32_t publicIp; + int32_t vnode; +} SVnodeGid; + +typedef struct { + int32_t sid; + int32_t vgId; // vnode group ID +} SMeterGid; + +typedef struct _tab_obj { + char meterId[TSDB_METER_ID_LEN + 1]; + uint64_t uid; + SMeterGid gid; + + int32_t sversion; // schema version + int64_t createdTime; + int32_t numOfTags; // for metric + int32_t numOfMeters; // for metric + int32_t numOfColumns; + int32_t schemaSize; + short nextColId; + char meterType : 4; + char status : 3; + char isDirty : 1; // if the table change tag column 1 value + char reserved[15]; + char updateEnd[1]; + + pthread_rwlock_t rwLock; + tSkipList * pSkipList; + struct _tab_obj *pHead; // for metric, a link list for all meters created + // according to this metric + char *pTagData; // TSDB_METER_ID_LEN(metric_name)+ + // tags_value1/tags_value2/tags_value3 + struct _tab_obj *prev, *next; + char * pSql; // pointer to SQL, for SC, null-terminated string + char * pReserve1; + char * pReserve2; + char * schema; + // SSchema schema[]; +} STabObj; + +typedef struct _vg_obj { + uint32_t vgId; + char dbName[TSDB_DB_NAME_LEN]; + int64_t createdTime; + uint64_t lastCreate; + uint64_t lastRemove; + int32_t numOfVnodes; + SVnodeGid vnodeGid[TSDB_VNODES_SUPPORT]; + int32_t numOfMeters; + int32_t lbIp; + int32_t lbTime; + int8_t lbState; + char reserved[16]; + char updateEnd[1]; + struct _vg_obj *prev, *next; + void * idPool; + STabObj ** meterList; +} SVgObj; + +typedef struct _db_obj { + char name[TSDB_DB_NAME_LEN + 1]; + int64_t createdTime; + SDbCfg cfg; + int32_t numOfVgroups; + int32_t numOfTables; + int32_t numOfMetrics; + uint8_t vgStatus; + uint8_t dropStatus; + char reserved[16]; + char updateEnd[1]; + + STabObj * pMetric; + struct _db_obj *prev, *next; + SVgObj * pHead; // empty vgroup first + SVgObj * pTail; // empty vgroup end + void * vgTimer; +} SDbObj; + +typedef struct _user_obj { + char user[TSDB_USER_LEN + 1]; + char pass[TSDB_KEY_LEN]; + char acct[TSDB_USER_LEN]; + int64_t createdTime; + char superAuth : 1; + char writeAuth : 1; + char reserved[16]; + char updateEnd[1]; + struct _user_obj *prev, *next; +} SUserObj; + +typedef struct { + int32_t numOfUsers; + int32_t numOfDbs; + int32_t numOfTimeSeries; + int32_t numOfPointsPerSecond; + int32_t numOfConns; + int32_t numOfQueries; + int32_t numOfStreams; + int64_t totalStorage; // Total storage wrtten from this account + int64_t compStorage; // Compressed storage on disk + int64_t queryTime; + int64_t totalPoints; + int64_t inblound; + int64_t outbound; + TSKEY sKey; + char accessState; // Checked by mgmt heartbeat message +} SAcctInfo; + +typedef struct { + char user[TSDB_USER_LEN + 1]; + char pass[TSDB_KEY_LEN]; + SAcctCfg cfg; + int32_t acctId; + int64_t createdTime; + char reserved[15]; + char updateEnd[1]; + SAcctInfo acctInfo; + + SDbObj * pHead; + SUserObj * pUser; + struct _connObj *pConn; + pthread_mutex_t mutex; +} SAcctObj; + +typedef struct _connObj { + SAcctObj * pAcct; + SDbObj * pDb; + SUserObj * pUser; + char user[TSDB_USER_LEN]; + char db[TSDB_METER_ID_LEN]; + uint64_t stime; // login time + char superAuth : 1; // super user flag + char writeAuth : 1; // write flag + char killConnection : 1; // kill the connection flag + uint32_t queryId; // query ID to be killed + uint32_t streamId; // stream ID to be killed + uint32_t ip; // shell IP + short port; // shell port + void * thandle; + SQList * pQList; // query list + SSList * pSList; // stream list + uint64_t qhandle; + struct _connObj *prev, *next; +} SConnObj; + +typedef struct { + char spi; + char encrypt; + char secret[TSDB_KEY_LEN]; + char cipheringKey[TSDB_KEY_LEN]; +} SSecInfo; + +typedef struct { + char type; + void * pNode; + short numOfColumns; + int rowSize; + int numOfRows; + int numOfReads; + short offset[TSDB_MAX_COLUMNS]; + short bytes[TSDB_MAX_COLUMNS]; + void * signature; + uint16_t payloadLen; /* length of payload*/ + char payload[]; /* payload for wildcard match in show tables */ +} SShowObj; + +extern SAcctObj acctObj; +extern SDnodeObj dnodeObj; + +// dnodeInt API +int mgmtInitDnodeInt(); +void mgmtCleanUpDnodeInt(); +int mgmtSendCreateMsgToVnode(STabObj *pMeter, int vnode); +int mgmtSendRemoveMeterMsgToVnode(STabObj *pMeter, int vnode); +int mgmtSendVPeersMsg(SVgObj *pVgroup, SDbObj *pDb); +int mgmtSendFreeVnodeMsg(int vnode); + +// shell API +int mgmtInitShell(); +void mgmtCleanUpShell(); +int mgmtRetriveUserAuthInfo(char *user, char *spi, char *encrypt, uint8_t *secret, uint8_t *ckey); + +// acct API +int mgmtAddDbIntoAcct(SAcctObj *pAcct, SDbObj *pDb); +int mgmtRemoveDbFromAcct(SAcctObj *pAcct, SDbObj *pDb); +int mgmtAddUserIntoAcct(SAcctObj *pAcct, SUserObj *pUser); +int mgmtRemoveUserFromAcct(SAcctObj *pAcct, SUserObj *pUser); +int mgmtAddConnIntoAcct(SConnObj *pConn); +int mgmtRemoveConnFromAcct(SConnObj *pConn); +void mgmtCheckAcct(); +int64_t mgmtGetAcctStatistic(SAcctObj *pAcct); + +// user API +int mgmtInitUsers(); +SUserObj *mgmtGetUser(char *name); +int mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass); +int mgmtDropUser(SAcctObj *pAcct, char *name); +int mgmtUpdateUser(SUserObj *pUser); +int mgmtGetUserMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveUsers(SShowObj *pShow, char *data, int rows, SConnObj *pConn); +void mgmtCleanUpUsers(); + +// metric API +int mgmtAddMeterIntoMetric(STabObj *pMetric, STabObj *pMeter); +int mgmtRemoveMeterFromMetric(STabObj *pMetric, STabObj *pMeter); +int mgmtGetMetricMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +// DB API +int mgmtInitDbs(); +int mgmtUpdateDb(SDbObj *pDb); +SDbObj *mgmtGetDb(char *db); +SDbObj *mgmtGetDbByMeterId(char *db); +int mgmtCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate); +int mgmtDropDbByName(SAcctObj *pAcct, char *name); +int mgmtDropDb(SDbObj *pDb); +/* void mgmtMonitorDbDrop(void *unused); */ +void mgmtMonitorDbDrop(void *unused, void *unusedt); +int mgmtAlterDb(SAcctObj *pAcct, SAlterDbMsg *pAlter); +int mgmtUseDb(SConnObj *pConn, char *name); +int mgmtAddVgroupIntoDb(SDbObj *pDb, SVgObj *pVgroup); +int mgmtAddVgroupIntoDbTail(SDbObj *pDb, SVgObj *pVgroup); +int mgmtRemoveVgroupFromDb(SDbObj *pDb, SVgObj *pVgroup); +int mgmtAddMetricIntoDb(SDbObj *pDb, STabObj *pMetric); +int mgmtRemoveMetricFromDb(SDbObj *pDb, STabObj *pMetric); +int mgmtMoveVgroupToTail(SDbObj *pDb, SVgObj *pVgroup); +int mgmtMoveVgroupToHead(SDbObj *pDb, SVgObj *pVgroup); +int mgmtGetDbMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveDbs(SShowObj *pShow, char *data, int rows, SConnObj *pConn); +void mgmtCleanUpDbs(); + +// vGroup API +int mgmtInitVgroups(); +SVgObj *mgmtGetVgroup(int vgId); +SVgObj *mgmtCreateVgroup(SDbObj *pDb); +int mgmtDropVgroup(SDbObj *pDb, SVgObj *pVgroup); +void mgmtSetVgroupIdPool(); +int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveVgroups(SShowObj *pShow, char *data, int rows, SConnObj *pConn); +void mgmtCleanUpVgroups(); +SAcctObj *mgmtGetVgroupAcct(int vgId); + +// meter API +int mgmtInitMeters(); +STabObj *mgmtGetMeter(char *meterId); +STabObj *mgmtGetMeterInfo(char *src, char *tags[]); +int mgmtRetrieveMetricMeta(void *thandle, char **pStart, STabObj *pMetric, SMetricMetaMsg *pInfo); +int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate); +int mgmtDropMeter(SDbObj *pDb, char *meterId, int ignore); +int mgmtAlterMeter(SDbObj *pDb, SAlterTableMsg *pAlter); +int mgmtGetMeterMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn); +void mgmtCleanUpMeters(); +SSchema *mgmtGetMeterSchema(STabObj *pMeter); // get schema for a meter + +bool mgmtMeterCreateFromMetric(STabObj *pMeterObj); +bool mgmtIsMetric(STabObj *pMeterObj); +bool mgmtIsNormalMeter(STabObj *pMeterObj); + +int mgmtGetDnodeMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveDnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn); +void mgmtSetDnodeVgid(int vnode, int vgId); +void mgmtUnSetDnodeVgid(int vnode); +void mgmtSetDnodeMaxVnodes(SDnodeObj *pDnode); + +int mgmtGetModuleMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveModules(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +int mgmtGetConfigMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveConfigs(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +int mgmtGetConnsMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveConns(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +int mgmtGetScoresMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int mgmtRetrieveScores(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +int grantGetGrantsMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +int grantRetrieveGrants(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +void mgmtSetModuleInDnode(SDnodeObj *pDnode, int moduleType); +int mgmtUnSetModuleInDnode(SDnodeObj *pDnode, int moduleType); + +extern int (*mgmtGetMetaFp[])(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); +extern int (*mgmtRetrieveFp[])(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +extern int tsVgUpdateSize; +extern int tsDbUpdateSize; +extern int tsUserUpdateSize; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_MGMT_H diff --git a/src/system/inc/mgmtProfile.h b/src/system/inc/mgmtProfile.h new file mode 100644 index 000000000000..38732c47930a --- /dev/null +++ b/src/system/inc/mgmtProfile.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_MGMTPROFILE_H +#define TDENGINE_MGMTPROFILE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mgmt.h" + +int mgmtGetQueryMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); + +int mgmtGetStreamMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn); + +int mgmtRetrieveQueries(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +int mgmtRetrieveStreams(SShowObj *pShow, char *data, int rows, SConnObj *pConn); + +int mgmtSaveQueryStreamList(char *cont, int contLen, SConnObj *pConn); + +int mgmtKillQuery(char *qidstr, SConnObj *pConn); + +int mgmtKillStream(char *qidstr, SConnObj *pConn); + +int mgmtKillConnection(char *qidstr, SConnObj *pConn); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_MGMTPROFILE_H diff --git a/src/system/inc/mgmtSystem.h b/src/system/inc/mgmtSystem.h new file mode 100644 index 000000000000..818c2a6ce5b8 --- /dev/null +++ b/src/system/inc/mgmtSystem.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_MGMTSYSTEM_H +#define TDENGINE_MGMTSYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +void mgmtCleanUpSystem(); + +void mgmtPrintSystemInfo(); + +int32_t mgmtInitSystem(); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_MGMTSYSTEM_H diff --git a/src/system/inc/vnode.h b/src/system/inc/vnode.h new file mode 100644 index 000000000000..b9683d80753d --- /dev/null +++ b/src/system/inc/vnode.h @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_H +#define TDENGINE_VNODE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tidpool.h" +#include "tlog.h" +#include "tmempool.h" +#include "trpc.h" +#include "tsclient.h" +#include "tsdb.h" +#include "tsdb.h" +#include "tsocket.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" +#include "vnodeCache.h" +#include "vnodeFile.h" +#include "vnodeShell.h" + +#define TSDB_FILE_HEADER_LEN 512 +#define TSDB_FILE_HEADER_VERSION_SIZE 32 +#define TSDB_CACHE_POS_BITS 13 +#define TSDB_CACHE_POS_MASK 0x1FFF + +#define TSDB_ACTION_INSERT 0 +#define TSDB_ACTION_IMPORT 1 +#define TSDB_ACTION_DELETE 2 +#define TSDB_ACTION_UPDATE 3 +#define TSDB_ACTION_MAX 4 + +enum _data_source { + TSDB_DATA_SOURCE_METER, + TSDB_DATA_SOURCE_VNODE, + TSDB_DATA_SOURCE_SHELL, + TSDB_DATA_SOURCE_QUEUE, + TSDB_DATA_SOURCE_LOG, +}; + +enum _sync_cmd { + TSDB_SYNC_CMD_FILE, + TSDB_SYNC_CMD_CACHE, + TSDB_SYNC_CMD_CREATE, + TSDB_SYNC_CMD_REMOVE, +}; + +enum _meter_state { + TSDB_METER_STATE_READY, + TSDB_METER_STATE_IMPORTING, + TSDB_METER_STATE_UPDATING, + TSDB_METER_STATE_DELETING, + TSDB_METER_STATE_DELETED, +}; + +typedef struct { + int64_t offset : 48; + int64_t length : 16; +} SMeterObjHeader; + +typedef struct { + int64_t len; + char data[]; +} SData; + +typedef struct { + int vnode; + SVnodeCfg cfg; + // SDiskDesc tierDisk[TSDB_MAX_TIER]; + SVPeerDesc vpeers[TSDB_VNODES_SUPPORT]; + SVnodeStatisticInfo vnodeStatistic; + char selfIndex; + char status; + char accessState; // Vnode access state, Readable/Writable + char syncStatus; + char commitInProcess; + pthread_t commitThread; + TSKEY firstKey; // minimum key uncommitted, it may be smaller than + // commitFirstKey + TSKEY commitFirstKey; // minimum key for a commit file, it shall be + // xxxx00000, calculated from fileId + TSKEY commitLastKey; // maximum key for a commit file, it shall be xxxx99999, + // calculated fromm fileId + int commitFileId; + TSKEY lastCreate; + TSKEY lastRemove; + TSKEY lastKey; // last key for the whole vnode, updated by every insert + // operation + uint64_t version; + + int streamRole; + int numOfStreams; + void *streamTimer; + + TSKEY lastKeyOnFile; // maximum key on the last file, is shall be xxxx99999 + int fileId; + int badFileId; + int numOfFiles; + int maxFiles; + int maxFile1; + int maxFile2; + int nfd; // temp head file FD + int hfd; // head file FD + int lfd; // last file FD + int tfd; // temp last file FD + int dfd; // data file FD + int64_t dfSize; + int64_t lfSize; + uint64_t * fmagic; // hold magic number for each file + char cfn[TSDB_FILENAME_LEN]; + char nfn[TSDB_FILENAME_LEN]; + char lfn[TSDB_FILENAME_LEN]; // last file name + char tfn[TSDB_FILENAME_LEN]; // temp last file name + pthread_mutex_t vmutex; + + int logFd; + char * pMem; + char * pWrite; + pthread_mutex_t logMutex; + char logFn[TSDB_FILENAME_LEN]; + char logOFn[TSDB_FILENAME_LEN]; + int64_t mappingSize; + int64_t mappingThreshold; + + void * commitTimer; + void ** meterList; + void * pCachePool; + void * pQueue; + pthread_t thread; + int peersOnline; + int shellConns; + int meterConns; + struct _qinfo *pQInfoList; + + TAOS * dbConn; + SMeterObjHeader *meterIndex; +} SVnodeObj; + +typedef struct SColumn { + short colId; + short bytes; + char type; +} SColumn; + +typedef struct _meter_obj { + uint64_t uid; + char meterId[TSDB_METER_ID_LEN]; + int sid; + short vnode; + short numOfColumns; + short bytesPerPoint; + short maxBytes; + int32_t pointsPerBlock; + int32_t pointsPerFileBlock; + int freePoints; + TSKEY lastKey; // updated by insert operation + TSKEY lastKeyOnFile; // last key on file, updated by commit action + TSKEY timeStamp; // delete or added time + uint64_t commitCount; + int32_t sversion; + short sqlLen; + char searchAlgorithm : 4; + char compAlgorithm : 4; + char state : 5; // deleted or added, 1: added + char status : 3; // 0: ok, 1: stop stream computing + + char reserved[16]; + int numOfQueries; + char * pSql; + void * pStream; + void * pCache; + SColumn *schema; +} SMeterObj; + +typedef struct { + char type; + char pversion; // protocol version + char action; // insert, import, delete, update + int32_t sversion; // only for insert + int32_t sid; + int32_t len; + uint64_t lastVersion; // latest version + char cont[]; +} SVMsgHeader; + +/* + * the value of QInfo.signature is used to denote that a query is executing, it + * isn't safe + * to release QInfo yet. + * The release operations will be blocked in a busy-waiting until the query + * operation reach a safepoint. + * Then it will reset the signature in a atomic operation, followed by release + * operation. + * Only the QInfo.signature == QInfo, this structure can be released safely. + */ +#define TSDB_QINFO_QUERY_FLAG 0x1 +#define TSDB_QINFO_RESET_SIG(x) ((x)->signature = (uint64_t)(x)) +#define TSDB_QINFO_SET_QUERY_FLAG(x) \ + __sync_val_compare_and_swap(&((x)->signature), (uint64_t)(x), TSDB_QINFO_QUERY_FLAG); + +// live lock: wait for query reaching a safe-point, release all resources +// belongs to this query +#define TSDB_WAIT_TO_SAFE_DROP_QINFO(x) \ + { \ + while (__sync_val_compare_and_swap(&((x)->signature), (x), 0) == TSDB_QINFO_QUERY_FLAG) { \ + taosMsleep(1); \ + } \ + } + +struct tSQLBinaryExpr; + +typedef struct SColumnFilter { + SColumnFilterMsg data; + int16_t colIdx; + int16_t colIdxInBuf; + + /* + * 0: denotes if its is required in the first round of scan of data block + * 1: denotes if its is required in the secondary scan + */ + int16_t req[2]; +} SColumnFilter; + +typedef bool (*__filter_func_t)(SColumnFilter *pFilter, char *val1, char *val2); + +typedef struct SColumnFilterInfo { + SColumnFilter pFilter; + int16_t elemSize; // element size in pData + __filter_func_t fp; // filter function + char * pData; // raw data, as the input for filter function +} SColumnFilterInfo; + +typedef struct { + short numOfCols; + SOrderVal order; + char keyIsMet; // if key is met, it will be set + char over; + int fileId; // only for query in file + int hfd; // only for query in file, head file handle + int dfd; // only for query in file, data file handle + int lfd; // only for query in file, last file handle + SCompBlock *pBlock; // only for query in file + SField ** pFields; + int numOfBlocks; // only for query in file + int blockBufferSize; // length of pBlock buffer + int currentSlot; + int firstSlot; + int slot; + int pos; + TSKEY key; + int compBlockLen; // only for import + int64_t blockId; + TSKEY skey; + TSKEY ekey; + int64_t nAggTimeInterval; + char intervalTimeUnit; // interval data type, used for daytime revise + + int16_t numOfOutputCols; + int16_t interpoType; + int16_t checkBufferInLoop; // check if the buffer is full during scan each block + + SLimitVal limit; + int32_t rowSize; + int32_t dataRowSize; // row size of each loaded data from disk, the value is + // used for prepare buffer + SSqlGroupbyExpr * pGroupbyExpr; + SSqlFunctionExpr *pSelectExpr; + + SColumnFilter *colList; + + int32_t numOfFilterCols; + SColumnFilterInfo *pFilterInfo; + + int64_t *defaultVal; + + TSKEY lastKey; + // buffer info + int64_t pointsRead; // the number of points returned + int64_t pointsToRead; // maximum number of points to read + int64_t pointsOffset; // the number of points offset to save read data + SData **sdata; + SData * tsData; // timestamp column/primary key column +} SQuery; + +typedef struct { + char spi; + char encrypt; + char secret[TSDB_KEY_LEN]; + char cipheringKey[TSDB_KEY_LEN]; +} SConnSec; + +typedef struct { + char * buffer; + char * offset; + int trans; + int bufferSize; + pthread_mutex_t qmutex; +} STranQueue; + +// internal globals +extern int tsMeterSizeOnFile; +extern uint32_t tsRebootTime; +extern void * rpcQhandle; +extern void * dmQhandle; +extern void * queryQhandle; +extern int tsMaxVnode; +extern int tsOpenVnodes; +extern SVnodeObj *vnodeList; +extern void * vnodeTmrCtrl; + +// read API +extern int (*vnodeSearchKeyFunc[])(char *pValue, int num, TSKEY key, int order); + +void *vnodeQueryInTimeRange(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *sqlExprs, + SQueryMeterMsg *pQueryMsg, int *code); + +void *vnodeQueryOnMultiMeters(SMeterObj **pMeterObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, + SQueryMeterMsg *pQueryMsg, int *code); + +// assistant/tool functions +SSqlGroupbyExpr *vnodeCreateGroupbyExpr(SQueryMeterMsg *pQuery, int32_t *code); + +SSqlFunctionExpr *vnodeCreateSqlFunctionExpr(SQueryMeterMsg *pQuery, int32_t *code); +bool vnodeValidateExprColumnInfo(SQueryMeterMsg* pQueryMsg, SSqlFuncExprMsg* pExprMsg); + +bool vnodeIsValidVnodeCfg(SVnodeCfg *pCfg); + +int32_t vnodeGetResultSize(void *handle, int32_t *numOfRows); + +int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows); + +int64_t vnodeGetOffsetVal(void *thandle); + +bool vnodeHasRemainResults(void *handle); + +int vnodeRetrieveQueryResult(void *handle, int *pNum, char *argv[]); + +int vnodeSaveQueryResult(void *handle, char *data); + +int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t *timePrec); + +void vnodeFreeQInfo(void *, bool); + +void vnodeFreeQInfoInQueue(void *param); + +bool vnodeIsQInfoValid(void *param); + +int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQuery); + +void vnodeQueryData(SSchedMsg *pMsg); + +// meter API +int vnodeOpenMetersVnode(int vnode); + +void vnodeCloseMetersVnode(int vnode); + +int vnodeCreateMeterObj(SMeterObj *pNew, SConnSec *pSec); + +int vnodeRemoveMeterObj(int vnode, int sid); + +int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, void *, int sversion, int *numOfPoints); + +int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, void *, int sversion, int *numOfPoints); + +int vnodeInsertBufferedPoints(int vnode); + +int vnodeSaveAllMeterObjToFile(int vnode); + +int vnodeSaveMeterObjToFile(SMeterObj *pObj); + +int vnodeSaveVnodeCfg(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc); + +int vnodeSaveVnodeInfo(int vnode); + +// cache API +void *vnodeOpenCachePool(int vnode); + +void vnodeCloseCachePool(int vnode); + +void *vnodeAllocateCacheInfo(SMeterObj *pObj); + +void vnodeFreeCacheInfo(SMeterObj *pObj); + +void vnodeSetCommitQuery(SMeterObj *pObj, SQuery *pQuery); + +int vnodeInsertPointToCache(SMeterObj *pObj, char *pData); + +int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery); + +uint64_t vnodeGetPoolCount(SVnodeObj *pVnode); + +void vnodeUpdateCommitInfo(SMeterObj *pObj, int slot, int pos, uint64_t count); + +void vnodeCommitOver(SVnodeObj *pVnode); + +TSKEY vnodeGetFirstKey(int vnode); + +int vnodeSyncRetrieveCache(int vnode, int fd); + +int vnodeSyncRestoreCache(int vnode, int fd); + +pthread_t vnodeCreateCommitThread(SVnodeObj *pVnode); + +void vnodeCancelCommit(SVnodeObj *pVnode); + +void vnodeCloseStream(SVnodeObj *pVnode); + +void vnodeProcessCommitTimer(void *param, void *tmrId); + +void vnodeSearchPointInCache(SMeterObj *pObj, SQuery *pQuery); + +int vnodeAllocateCacheBlock(SMeterObj *pObj); + +int vnodeFreeCacheBlock(SCacheBlock *pCacheBlock); + +int vnodeIsCacheCommitted(SMeterObj *pObj); + +// file API +int vnodeInitFile(int vnode); + +int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery); + +void *vnodeCommitToFile(void *param); + +void *vnodeCommitMultiToFile(SVnodeObj *pVnode, int ssid, int esid); + +int vnodeSyncRetrieveFile(int vnode, int fd, uint32_t fileId, uint64_t *fmagic); + +int vnodeSyncRestoreFile(int vnode, int sfd); + +int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pBlock, SData *data[], SData *cdata[], int pointsRead); + +int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery); + +int vnodeReadCompBlockToMem(SMeterObj *pObj, SQuery *pQuery, SData *sdata[]); + +int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast); + +void vnodeCloseCommitFiles(SVnodeObj *pVnode); + +int vnodeReadLastBlockToMem(SMeterObj *pObj, SCompBlock *pBlock, SData *sdata[]); + +// vnode API +int vnodeInitPeer(int numOfThreads); + +void vnodeCleanUpPeer(); + +int vnodeOpenPeerVnode(int vnode); + +void vnodeClosePeerVnode(int vnode); + +void *vnodeGetMeterPeerConnection(SMeterObj *pObj, int index); + +int vnodeForwardToPeer(SMeterObj *pObj, char *msg, int msgLen, char action, int sversion); + +void vnodeCloseAllSyncFds(int vnode); + +void vnodeConfigVPeers(int vnode, int numOfPeers, SVPeerDesc peerDesc[]); + +void vnodeStartSyncProcess(SVnodeObj *pVnode); + +void vnodeCancelSync(int vnode); + +void vnodeListPeerStatus(char *buffer); + +void vnodeCheckOwnStatus(SVnodeObj *pVnode); + +int vnodeSaveMeterObjToFile(SMeterObj *pObj); + +int vnodeRecoverFromPeer(SVnodeObj *pVnode, int fileId); + +// vnodes API +int vnodeInitVnodes(); + +int vnodeInitStore(); + +void vnodeCleanUpVnodes(); + +void vnodeRemoveVnode(int vnode); + +int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc); + +void vnodeCreateStream(SMeterObj *pObj); + +void vnodeRemoveStream(SMeterObj *pObj); + +// shell API +int vnodeInitShell(); + +void vnodeCleanUpShell(); + +int vnodeOpenShellVnode(int vnode); + +void vnodeCloseShellVnode(int vnode); + +// mgmt +int vnodeInitMgmt(); + +void vnodeCleanUpMgmt(); + +int vnodeRetrieveMissedCreateMsg(int vnode, int fd, uint64_t stime); + +int vnodeRestoreMissedCreateMsg(int vnode, int fd); + +int vnodeRetrieveMissedRemoveMsg(int vid, int fd, uint64_t stime); + +int vnodeRestoreMissedRemoveMsg(int vnode, int fd); + +int vnodeProcessBufferedCreateMsgs(int vnode); + +int vnodeSendVpeerCfgMsg(int vnode); + +int vnodeSendMeterCfgMsg(int vnode, int sid); + +int vnodeMgmtConns(); + +// commit +int vnodeInitCommit(int vnode); + +void vnodeCleanUpCommit(int vnode); + +int vnodeRenewCommitLog(int vnode); + +void vnodeRemoveCommitLog(int vnode); + +int vnodeWriteToCommitLog(SMeterObj *pObj, char action, char *cont, int contLen, int sversion); + +extern int (*vnodeProcessAction[])(SMeterObj *, char *, int, char, void *, int, int *); + +extern int (*pCompFunc[])(const char *const input, int inputSize, const int elements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize); + +extern int (*pDecompFunc[])(const char *const input, int compressedSize, const int elements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize); + +// global variable and APIs provided by mgmt +extern char mgmtStatus; +extern char mgmtDirectory[]; +extern const int16_t vnodeFileVersion; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODE_H diff --git a/src/system/inc/vnodeCache.h b/src/system/inc/vnodeCache.h new file mode 100644 index 000000000000..feb39e57035d --- /dev/null +++ b/src/system/inc/vnodeCache.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODECACHE_H +#define TDENGINE_VNODECACHE_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + short notFree; + short numOfPoints; + int slot; + int index; + int64_t blockId; + struct _meter_obj *pMeterObj; + char * offset[]; +} SCacheBlock; + +typedef struct { + int64_t blocks; + int maxBlocks; + int numOfBlocks; + int unCommittedBlocks; + int32_t currentSlot; + int32_t commitSlot; // which slot is committed + int32_t commitPoint; // starting point for next commit + SCacheBlock **cacheBlocks; // cache block list, circular list +} SCacheInfo; + +typedef struct { + int vnode; + char ** pMem; + long freeSlot; + pthread_mutex_t vmutex; + uint64_t count; // kind of transcation ID + long notFreeSlots; + long threshold; + char commitInProcess; + int cacheBlockSize; + int cacheNumOfBlocks; +} SCachePool; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODECACHE_H diff --git a/src/system/inc/vnodeDataFilterFunc.h b/src/system/inc/vnodeDataFilterFunc.h new file mode 100644 index 000000000000..c2cd06597a37 --- /dev/null +++ b/src/system/inc/vnodeDataFilterFunc.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODEDATAFILTERFUNC_H +#define TDENGINE_VNODEDATAFILTERFUNC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "vnode.h" + +__filter_func_t *vnodeGetRangeFilterFuncArray(int32_t type); + +__filter_func_t *vnodeGetValueFilterFuncArray(int32_t type); + +bool vnodeSupportPrefilter(int32_t type); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEDATAFILTERFUNC_H diff --git a/src/system/inc/vnodeFile.h b/src/system/inc/vnodeFile.h new file mode 100644 index 000000000000..c2fada82f8b0 --- /dev/null +++ b/src/system/inc/vnodeFile.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODEFILE_H +#define TDENGINE_VNODEFILE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tchecksum.h" + +#define TSDB_VNODE_DELIMITER 0xF00AFA0F + +typedef struct { int64_t compInfoOffset; } SCompHeader; + +typedef struct { + short colId; + short bytes; + int32_t numOfNullPoints; + int32_t type : 8; + int32_t offset : 24; + int32_t len; // data length + int64_t sum; + int64_t max; + int64_t min; + int64_t wsum; + char reserved[16]; +} SField; + +typedef struct { + int64_t last : 1; + int64_t offset : 63; + int32_t algorithm : 8; // compression algorithm can be changed + int32_t numOfPoints : 24; // how many points have been written into this block + int32_t sversion; + int32_t len; // total length of this data block + uint16_t numOfCols; + char reserved[16]; + TSKEY keyFirst; // time stamp for the first point + TSKEY keyLast; // time stamp for the last point +} SCompBlock; + +typedef struct { + SCompBlock *compBlock; + SField * fields; +} SCompBlockFields; + +typedef struct { + uint64_t uid; + int64_t last : 1; + int64_t numOfBlocks : 62; + uint32_t delimiter; // delimiter for recovery + TSCKSUM checksum; + SCompBlock compBlocks[]; // comp block list +} SCompInfo; + +typedef struct { + long tempHeadOffset; + long compInfoOffset; + long oldCompBlockOffset; + + long oldNumOfBlocks; + long newNumOfBlocks; + long finalNumOfBlocks; + + long oldCompBlockLen; + long newCompBlockLen; + long finalCompBlockLen; + + long committedPoints; + int commitSlot; + int32_t last : 1; + int32_t changed : 1; + int32_t commitPos : 30; + int64_t commitCount; + SCompBlock lastBlock; +} SMeterInfo; + +typedef struct { int64_t totalStorage; } SVnodeHeadInfo; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEFILE_H diff --git a/src/system/inc/vnodeMgmt.h b/src/system/inc/vnodeMgmt.h new file mode 100644 index 000000000000..0fec3f2437a2 --- /dev/null +++ b/src/system/inc/vnodeMgmt.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODEMGMT_H +#define TDENGINE_VNODEMGMT_H + +#ifdef __cplusplus +extern "C" { +#endif + +int vnodeProcessCreateMeterRequest(char *pMsg); +int vnodeProcessRemoveMeterRequest(char *pMsg); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEMGMT_H diff --git a/src/system/inc/vnodeQueryImpl.h b/src/system/inc/vnodeQueryImpl.h new file mode 100644 index 000000000000..61e7ff750b3f --- /dev/null +++ b/src/system/inc/vnodeQueryImpl.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODEQUERYUTIL_H +#define TDENGINE_VNODEQUERYUTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include "ihash.h" + +#define GET_QINFO_ADDR(x) ((char*)(x)-offsetof(SQInfo, query)) +#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0) + +#define DEFAULT_INTERN_BUF_SIZE 8192L +#define INIT_ALLOCATE_DISK_PAGES 60L +#define DEFAULT_DATA_FILE_MAPPING_PAGES 2L +#define DEFAULT_DATA_FILE_MMAP_WINDOW_SIZE (DEFAULT_DATA_FILE_MAPPING_PAGES * DEFAULT_INTERN_BUF_SIZE) + +#define IO_ENGINE_MMAP 0 +#define IO_ENGINE_SYNC 1 + +#define DEFAULT_IO_ENGINE IO_ENGINE_SYNC + +/** + * check if the primary column is load by default, otherwise, the program will + * forced to load primary column explicitly. + */ +#define PRIMARY_TSCOL_LOADED(query) ((query)->colList[0].data.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) + +typedef enum { + + /* + * the program will call this function again, if this status is set. + * used to transfer from QUERY_RESBUF_FULL + */ + QUERY_NOT_COMPLETED = 0x1, + + /* + * output buffer is full, so, the next query will be employed, + * in this case, we need to set the appropriated start scan point for + * the next query. + * + * this status is only exist in group-by clause and + * diff/add/division/mulitply/ query. + */ + QUERY_RESBUF_FULL = 0x2, + + /* + * query is over + * 1. this status is used in one row result query process, e.g., + * count/sum/first/last/ + * avg...etc. + * 2. when the query range on timestamp is satisfied, it is also denoted as + * query_compeleted + */ + QUERY_COMPLETED = 0x4, + + /* + * all data has been scanned, so current search is stopped, + * At last, the function will transfer this status to QUERY_COMPLETED + */ + QUERY_NO_DATA_TO_CHECK = 0x8, + +} vnodeQueryStatus; + +typedef struct SPointInterpoSupporter { + int32_t numOfCols; + char** pPrevPoint; + char** pNextPoint; +} SPointInterpoSupporter; + +typedef struct SBlockInfo { + TSKEY keyFirst; + TSKEY keyLast; + int32_t numOfCols; + int32_t size; +} SBlockInfo; + +typedef struct SMeterDataBlockInfoEx { + SCompBlockFields pBlock; + SMeterDataInfo* pMeterDataInfo; + int32_t blockIndex; + int32_t groupIdx; /* number of group is less than the total number of meters */ +} SMeterDataBlockInfoEx; + +typedef enum { + DISK_DATA_LOAD_FAILED = -0x1, + DISK_DATA_LOADED = 0x0, + DISK_DATA_DISCARDED = 0x01, +} vnodeDiskLoadStatus; + +#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) +#define IS_SUPPLEMENT_SCAN(runtime) (!IS_MASTER_SCAN(runtime)) +#define SET_SUPPLEMENT_SCAN_FLAG(runtime) ((runtime)->scanFlag = SUPPLEMENTARY_SCAN) +#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) + +typedef int (*__block_search_fn_t)(char* data, int num, int64_t key, int order); +typedef int32_t (*__read_data_fn_t)(int fd, SQInfo* pQInfo, SQueryFileInfo* pQueryFile, char* buf, uint64_t offset, + int32_t size); + +static FORCE_INLINE SMeterObj* getMeterObj(void* hashHandle, int32_t sid) { + return *(SMeterObj**)taosGetIntHashData(hashHandle, sid); +} + +bool isQueryKilled(SQuery* pQuery); +bool isFixedOutputQuery(SQuery* pQuery); +bool isPointInterpoQuery(SQuery* pQuery); +bool isTopBottomQuery(SQuery* pQuery); +bool isFirstLastRowQuery(SQuery* pQuery); + +bool needSupplementaryScan(SQuery* pQuery); +bool onDemandLoadDatablock(SQuery* pQuery, int16_t queryRangeSet); + +void setQueryStatus(SQuery* pQuery, int8_t status); + +bool doRevisedResultsByLimit(SQInfo* pQInfo); +void truncateResultByLimit(SQInfo* pQInfo, int64_t* final, int32_t* interpo); + +void initCtxOutputBuf(SQueryRuntimeEnv* pRuntimeEnv); +void cleanCtxOutputBuf(SQueryRuntimeEnv* pRuntimeEnv); +void resetCtxOutputBuf(SQueryRuntimeEnv* pRuntimeEnv); +void forwardCtxOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, int64_t output); + +bool needPrimaryTimestampCol(SQuery* pQuery, SBlockInfo* pBlockInfo); +void vnodeScanAllData(SQueryRuntimeEnv* pRuntimeEnv); + +int32_t vnodeQueryResultInterpolate(SQInfo* pQInfo, tFilePage** pDst, tFilePage** pDataSrc, int32_t numOfRows, + int32_t* numOfInterpo); +void copyResToQueryResultBuf(SMeterQuerySupportObj* pSupporter, SQuery* pQuery); +void moveDescOrderResultsToFront(SQueryRuntimeEnv* pRuntimeEnv); + +void doSkipResults(SQueryRuntimeEnv* pRuntimeEnv); +void doFinalizeResult(SQueryRuntimeEnv* pRuntimeEnv); +int64_t getNumOfResult(SQueryRuntimeEnv* pRuntimeEnv); + +void forwardIntervalQueryRange(SMeterQuerySupportObj* pSupporter, SQueryRuntimeEnv* pRuntimeEnv); +void forwardQueryStartPosition(SQueryRuntimeEnv* pRuntimeEnv); + +bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj* pSupporter, + SPointInterpoSupporter* pPointInterpSupporter); + +void pointInterpSupporterInit(SQuery* pQuery, SPointInterpoSupporter* pInterpoSupport); +void pointInterpSupporterDestroy(SPointInterpoSupporter* pPointInterpSupport); +void pointInterpSupporterSetData(SQInfo* pQInfo, SPointInterpoSupporter* pPointInterpSupport); + +int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv* pRuntimeEnv, SPositionInfo* position); +void doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter); +void disableFunctForSuppleScanAndSetSortOrder(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); +void enableFunctForMasterScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order); + +int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj* pSupporter); +void copyFromGroupBuf(SQInfo* pQInfo, SOutputRes* result); + +SBlockInfo getBlockBasicInfo(void* pBlock, int32_t blockType); +SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQuery* pQuery, int32_t slot); + +void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus, char* data, + SBlockInfo* pBlockBasicInfo, SMeterDataInfo* pDataHeadInfoEx, SField* pFields, + __block_search_fn_t searchFn); + +SMeterDataInfo** vnodeFilterQualifiedMeters(SQInfo* pQInfo, int32_t vid, SQueryFileInfo* pQueryFileInfo, + tSidSet* pSidSet, SMeterDataInfo* pMeterDataInfo, int32_t* numOfMeters); +int32_t vnodeGetVnodeHeaderFileIdx(int32_t* fid, SQueryRuntimeEnv* pRuntimeEnv, int32_t order); + +int32_t createDataBlocksInfoEx(SMeterDataInfo** pMeterDataInfo, int32_t numOfMeters, + SMeterDataBlockInfoEx** pDataBlockInfoEx, int32_t numOfCompBlocks, + int32_t* nAllocBlocksInfoSize, int64_t addr); +void freeMeterBlockInfoEx(SMeterDataBlockInfoEx* pDataBlockInfoEx, int32_t len); + +void setExecutionContext(SMeterQuerySupportObj* pSupporter, SOutputRes* outputRes, int32_t meterIdx, int32_t groupIdx); +void setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo); + +int64_t getQueryStartPositionInCache(SQueryRuntimeEnv* pRuntimeEnv, int32_t* slot, int32_t* pos, bool ignoreQueryRange); +int64_t getNextAccessedKeyInData(SQuery* pQuery, int64_t* pPrimaryCol, SBlockInfo* pBlockInfo, int32_t blockStatus); + +void setIntervalQueryRange(SMeterQuerySupportObj* pSupporter, int64_t key, SMeterDataInfo* pInfoEx); +void saveIntervalQueryRange(SQuery* pQuery, SMeterQueryInfo* pInfo); +void restoreIntervalQueryRange(SQuery* pQuery, SMeterQueryInfo* pInfo); + +uint32_t getDataBlocksForMeters(SMeterQuerySupportObj* pSupporter, SQuery* pQuery, char* pHeaderData, + int32_t numOfMeters, SQueryFileInfo* pQueryFileInfo, SMeterDataInfo** pMeterDataInfo); +int32_t LoadDatablockOnDemand(SCompBlock* pBlock, SField** pFields, int8_t* blkStatus, SQueryRuntimeEnv* pRuntimeEnv, + int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand); + +void setMeterQueryInfo(SMeterQuerySupportObj* pSupporter, SMeterDataInfo* pMeterDataInfo); +void setMeterDataInfo(SMeterDataInfo* pMeterDataInfo, SMeterObj* pMeterObj, int32_t meterIdx, int32_t groupId); + +void vnodeSetTagValueInParam(tSidSet* pSidSet, SQueryRuntimeEnv* pRuntimeEnv, SMeterSidExtInfo* pMeterInfo); + +void vnodeCheckIfDataExists(SQueryRuntimeEnv* pRuntimeEnv, SMeterObj* pMeterObj, bool* dataInDisk, bool* dataInCache); + +void displayInterResult(SData** pdata, SQuery* pQuery, int32_t numOfRows); + +void vnodePrintQueryStatistics(SMeterQuerySupportObj* pSupporter); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEQUERYUTIL_H diff --git a/src/system/inc/vnodeRead.h b/src/system/inc/vnodeRead.h new file mode 100644 index 000000000000..31b451280070 --- /dev/null +++ b/src/system/inc/vnodeRead.h @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODEREAD_H +#define TDENGINE_VNODEREAD_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include "tinterpolation.h" +#include "vnodeTagMgmt.h" + +/* + * use to keep the first point position, consisting of position in blk and block + * id, file id + */ +typedef struct { + int32_t pos; + int32_t slot; + int32_t fileId; +} SPositionInfo; + +typedef struct SQueryLoadBlockInfo { + int32_t fileListIndex; /* index of this file in files list of this vnode */ + int32_t fileId; + int32_t slotIdx; + int32_t sid; +} SQueryLoadBlockInfo; + +typedef struct SQueryLoadCompBlockInfo { + int32_t sid; /* meter sid */ + int32_t fileId; + int32_t fileListIndex; +} SQueryLoadCompBlockInfo; +/* + * the header file info for one vnode + */ +typedef struct SQueryFileInfo { + int32_t fileID; /* file id */ + char headerFilePath[256]; /* full file name */ + char dataFilePath[256]; + char lastFilePath[256]; + int32_t defaultMappingSize; /* default mapping size */ + + int32_t headerFd; /* file handler */ + char* pHeaderFileData; /* mmap header files */ + size_t headFileSize; + + int32_t dataFd; + char* pDataFileData; + size_t dataFileSize; + uint64_t dtFileMappingOffset; + + int32_t lastFd; + char* pLastFileData; + size_t lastFileSize; + uint64_t lastFileMappingOffset; + +} SQueryFileInfo; + +typedef struct SQueryCostStatistics { + double cacheTimeUs; + double fileTimeUs; + + int64_t numOfFiles; // opened files during query + int64_t numOfTables; // num of queries tables + int64_t numOfSeek; // number of seek operation + + int64_t readDiskBlocks; // accessed disk block + int64_t skippedFileBlocks; // skipped blocks + int64_t blocksInCache; // accessed cache blocks + + int64_t readField; // field size + int64_t totalFieldSize; // total read fields size + double loadFieldUs; // total elapsed time to read fields info + + int64_t totalBlockSize; // read data blocks + double loadBlocksUs; // total elapsed time to read data blocks + + int64_t totalGenData; // in-memory generated data + + int64_t readCompInfo; // read compblock info + int64_t totalCompInfoSize; // total comp block size + double loadCompInfoUs; // total elapsed time to read comp block info + + int64_t tmpBufferInDisk; // size of buffer for intermeidate result +} SQueryCostStatistics; + +typedef struct RuntimeEnvironment { + SPositionInfo startPos; /* the start position, used for secondary/third iteration */ + SPositionInfo endPos; /* the last access position in query, served as the + start pos of reversed order query */ + SPositionInfo nextPos; /* start position of the next scan */ + + SData* colDataBuffer[TSDB_MAX_COLUMNS]; + + /* + * for data that requires second/third scan of all data, to denote the column + * need to perform operation refactor to SQLFunctionCtx + */ + bool* go; + + // Indicate if data block is loaded, the block is first/last/internal block + int8_t blockStatus; + int32_t internalBufSize; + + SData* primaryColBuffer; + char* unzipBuffer; + char* secondaryUnzipBuffer; + + SQuery* pQuery; + SMeterObj* pMeterObj; + SQLFunctionCtx* pCtx; + + char* buffer; /* column data load buffer, colDataBuffer is point to this value + */ + SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */ + SQueryLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */ + + /* + * header files info, avoid to iterate the directory, the data is acquired + * during + * in query preparation function + */ + SQueryFileInfo* pHeaderFiles; + uint32_t numOfFiles; /* number of files of one vnode during query execution */ + + int16_t numOfRowsPerPage; + int16_t offset[TSDB_MAX_COLUMNS]; + + int16_t scanFlag; /* denotes reversed scan of data or not */ + SInterpolationInfo interpoInfo; + SData** pInterpoBuf; + + SQueryCostStatistics summary; +} SQueryRuntimeEnv; + +typedef struct SOutputRes { + uint16_t numOfRows; + int32_t nAlloc; + tFilePage** result; +} SOutputRes; + +/* intermediate result during multimeter query involves interval */ +typedef struct SMeterQueryInfo { + int64_t lastKey; + int64_t skey; + int64_t ekey; + int32_t numOfRes; + uint32_t numOfPages; + uint32_t numOfAlloc; + + int32_t reverseIndex; // reversed output indicator, start from (numOfRes-1) + int16_t reverseFillRes; // denote if reverse fill the results in + // supplementary scan required or not + int16_t queryRangeSet; // denote if the query range is set, only available + // for interval query + int16_t lastResRows; // + + uint32_t* pageList; +} SMeterQueryInfo; + +typedef struct SMeterDataInfo { + uint64_t offsetInHeaderFile; + int32_t numOfBlocks; + int32_t start; // start block index + SCompBlock** pBlock; + int32_t meterOrderIdx; + SMeterObj* pMeterObj; + int32_t groupIdx; // group id in meter list + + SMeterQueryInfo* pMeterQInfo; +} SMeterDataInfo; + +typedef struct SMeterQuerySupportObj { + void* pMeterObj; + + SMeterSidExtInfo** pMeterSidExtInfo; + int32_t numOfMeters; + + /* + * multimeter query resultset. + * In multimeter queries, the result is temporarily stored on this structure, instead of + * directly put result into output buffer, since we have no idea how many number of + * rows may be generated by a specific subgroup. When query on all subgroups is executed, + * the result is copy to output buffer. This attribution is not used during single meter query processing. + */ + SOutputRes* pResult; + SQueryRuntimeEnv runtimeEnv; + + int64_t rawSKey; + int64_t rawEKey; + + int32_t subgroupIdx; + int32_t offset; /* offset in group result set of subgroup */ + + tSidSet* pSidSet; + + /* + * the query is executed position on which meter of the whole list. + * when the index reaches the last one of the list, it means the query is completed. + * We later may refactor to remove this attribution by using another flag to denote + * whether a multimeter query is completed or not. + */ + int32_t meterIdx; + + int32_t meterOutputFd; + int32_t lastPageId; + int32_t numOfPages; + int32_t numOfGroupResultPages; + int32_t groupResultSize; + + char* meterOutputMMapBuf; + int64_t bufSize; + char extBufFile[256]; // external file name + + SMeterDataInfo* pMeterDataInfo; + +} SMeterQuerySupportObj; + +typedef struct _qinfo { + uint64_t signature; + + char user[TSDB_METER_ID_LEN + 1]; + char sql[TSDB_SHOW_SQL_LEN]; + uint8_t stream; + uint16_t port; + uint32_t ip; + uint64_t startTime; + int64_t useconds; + int killed; + struct _qinfo *prev, *next; + + SQuery query; + int num; + int totalPoints; + int pointsRead; + int pointsReturned; + int pointsInterpo; + int code; + + char bufIndex; + char changed; + char over; + SMeterObj* pObj; + + int (*fp)(SMeterObj*, SQuery*); + + sem_t dataReady; + SMeterQuerySupportObj* pMeterQuerySupporter; + +} SQInfo; + +int32_t vnodeQuerySingleMeterPrepare(SQInfo* pQInfo, SMeterObj* pMeterObj, SMeterQuerySupportObj* pSMultiMeterObj); + +void vnodeQueryFreeQInfoEx(SQInfo* pQInfo); + +bool vnodeParametersSafetyCheck(SQuery* pQuery); + +int32_t vnodeMultiMeterQueryPrepare(SQInfo* pQInfo, SQuery* pQuery); + +/** + * decrease the numofQuery of each table that is queried, enable the + * remove/close operation can be executed + * @param pQInfo + */ +void vnodeDecMeterRefcnt(SQInfo* pQInfo); + +/* sql query handle in dnode */ +void vnodeSingleMeterQuery(SSchedMsg* pMsg); + +/* + * handle multi-meter query process + */ +void vnodeMultiMeterQuery(SSchedMsg* pMsg); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEREAD_H diff --git a/src/system/inc/vnodeShell.h b/src/system/inc/vnodeShell.h new file mode 100644 index 000000000000..d0194a1765fc --- /dev/null +++ b/src/system/inc/vnodeShell.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODESHELL_H +#define TDENGINE_VNODESHELL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef struct { + int sid; + int vnode; + uint32_t ip; + short port; + int count; // track the number of imports + int code; // track the code of imports + int numOfTotalPoints; // track the total number of points imported + void * thandle; // handle from TAOS layer + void * qhandle; +} SShellObj; + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODESHELL_H diff --git a/src/system/inc/vnodeStore.h b/src/system/inc/vnodeStore.h new file mode 100644 index 000000000000..e38abad347e2 --- /dev/null +++ b/src/system/inc/vnodeStore.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODESTORE_H +#define TDENGINE_VNODESTORE_H + +#ifdef __cplusplus +extern "C" { +#endif + +void vnodeProcessDataFromVnode(SIntMsg *msg, void *tcpHandle); +void vnodeCalcOpenVnodes(); + +#ifdef __cplusplus +} +#endif + +#endif // TDEGINE_VNODESTORE_H diff --git a/src/system/inc/vnodeSystem.h b/src/system/inc/vnodeSystem.h new file mode 100644 index 000000000000..10745e7a72ed --- /dev/null +++ b/src/system/inc/vnodeSystem.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODESYSTEM_H +#define TDENGINE_VNODESYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +int vnodeInitSystem(); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODESYSTEM_H diff --git a/src/system/inc/vnodeTagMgmt.h b/src/system/inc/vnodeTagMgmt.h new file mode 100644 index 000000000000..898a2d394456 --- /dev/null +++ b/src/system/inc/vnodeTagMgmt.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODETAGMGMT_H +#define TDENGINE_VNODETAGMGMT_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * @version 0.1 + + * @date 2018/01/02 + * @author liaohj + * management of the tag value of tables + * in query, client need the vnode to aggregate results according to tags + * values, + * the grouping operation is done here. + * Note: + * 1. we implement a quick sort algorithm, may remove it later. + */ + +typedef struct tTagSchema { + struct SSchema *pSchema; + int32_t numOfCols; + int32_t colOffset[]; +} tTagSchema; + +typedef struct tSidSet { + int32_t numOfSids; + int32_t numOfSubSet; + SMeterSidExtInfo **pSids; + int32_t * starterPos; // position of each subgroup, generated according to + + tTagSchema *pTagSchema; + tOrderIdx orderIdx; +} tSidSet; + +typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, void *param); + +tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOfMeters, SSchema *pSchema, + int32_t numOfTags, int16_t *orderList, int32_t numOfOrderCols); + +tTagSchema *tCreateTagSchema(SSchema *pSchema, int32_t numOfTagCols); + +int32_t *calculateSubGroup(void **pSids, int32_t numOfMeters, int32_t *numOfSubset, tOrderDescriptor *pOrderDesc, + __ext_compar_fn_t compareFn); + +void tSidSetDestroy(tSidSet **pSets); + +void tSidSetSort(tSidSet *pSets); + +int32_t meterSidComparator(const void *s1, const void *s2, void *param); + +int32_t doCompare(char *f1, char *f2, int32_t type, int32_t size); + +void tQSortEx(void **pMeterSids, size_t size, int32_t start, int32_t end, void *param, __ext_compar_fn_t compareFn); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODETAGMGMT_H diff --git a/src/system/inc/vnodeUtil.h b/src/system/inc/vnodeUtil.h new file mode 100644 index 000000000000..ca84f233db30 --- /dev/null +++ b/src/system/inc/vnodeUtil.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_UTIL_H +#define TDENGINE_VNODE_UTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* get the qinfo struct address from the query struct address */ +#define GET_COLUMN_BYTES(query, colidx) \ + ((query)->colList[(query)->pSelectExpr[colidx].pBase.colInfo.colIdxInBuf].data.bytes) +#define GET_COLUMN_TYPE(query, colidx) \ + ((query)->colList[(query)->pSelectExpr[colidx].pBase.colInfo.colIdxInBuf].data.type) + +#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) +#define EXTRA_BYTES 2 // for possible compression deflation + +int vnodeGetEid(int days); + +int vnodeCheckFileIntegrity(FILE *fp); + +void vnodeCreateFileHeader(FILE *fp); + +void vnodeCreateFileHeaderFd(int fd); + +void vnodeGetHeadFileHeaderInfo(int fd, SVnodeHeadInfo *pHeadInfo); + +void vnodeUpdateHeadFileHeader(int fd, SVnodeHeadInfo *pHeadInfo); + +/** + * check if two schema is identical or not + * This function does not check if a schema is valid or not + * + * @param pSSchemaFirst + * @param numOfCols1 + * @param pSSchemaSecond + * @param numOfCols2 + * @return + */ +bool vnodeMeterSchemaIdentical(SColumn *pSchema1, int32_t numOfCols1, SColumn *pSchema2, int32_t numOfCols2); + +/** + * free SFields in SQuery + * vnodeFreeFields must be called before free(pQuery->pBlock); + * @param pQuery + */ +void vnodeFreeFields(SQuery *pQuery); + +void vnodeUpdateFilterColumnIndex(SQuery* pQuery); +void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj); + +int32_t vnodeCreateFilterInfo(SQuery *pQuery); + +bool vnodeFilterData(SQuery* pQuery, int32_t* numOfActualRead, int32_t index); +bool vnodeDoFilterData(SQuery* pQuery, int32_t elemPos); + +bool vnodeIsProjectionQuery(SSqlFunctionExpr *pExpr, int32_t numOfOutput); + +int32_t vnodeIncQueryRefCount(SQueryMeterMsg *pQueryMsg, SMeterSidExtInfo **pSids, SMeterObj **pMeterObjList, + int32_t *numOfInc); + +void vnodeDecQueryRefCount(SQueryMeterMsg *pQueryMsg, SMeterObj **pMeterObjList, int32_t numOfInc); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODE_UTIL_H diff --git a/src/system/src/dnodeMgmt.c b/src/system/src/dnodeMgmt.c new file mode 100644 index 000000000000..0c6822159c07 --- /dev/null +++ b/src/system/src/dnodeMgmt.c @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "dnodeSystem.h" +#include "taosmsg.h" +#include "trpc.h" +#include "tsched.h" +#include "tsystem.h" +#include "vnode.h" +#include "vnodeMgmt.h" +#include "vnodeSystem.h" +#include "vnodeUtil.h" + +int vnodeProcessVPeersMsg(char *msg); +int vnodeProcessCreateMeterMsg(char *pMsg); +int vnodeProcessFreeVnodeRequest(char *pMsg); +int vnodeProcessVPeerCfgRsp(char *msg); +int vnodeProcessMeterCfgRsp(char *msg); +int vnodeProcessAlterStreamRequest(char *pMsg); + +void mgmtProcessMsgFromVnode(SSchedMsg *sched); +void *mgmtQhandle; + +int vnodeSendMsgToMgmt(char *msg) { + SSchedMsg schedMsg; + schedMsg.fp = mgmtProcessMsgFromVnode; + schedMsg.msg = msg; + schedMsg.ahandle = NULL; + schedMsg.thandle = NULL; + taosScheduleTask(mgmtQhandle, &schedMsg); + + return 0; +} + +void vnodeProcessMsgFromMgmt(SSchedMsg *sched) { + char msgType = *sched->msg; + char *content = sched->msg + 1; + + dTrace("msg:%s is received from mgmt", taosMsg[msgType]); + + if (msgType == TSDB_MSG_TYPE_CREATE) { + vnodeProcessCreateMeterRequest(content); + } else if (msgType == TSDB_MSG_TYPE_VPEERS) { + vnodeProcessVPeersMsg(content); + } else if (msgType == TSDB_MSG_TYPE_VPEER_CFG_RSP) { + vnodeProcessVPeerCfgRsp(content); + } else if (msgType == TSDB_MSG_TYPE_METER_CFG_RSP) { + vnodeProcessMeterCfgRsp(content); + } else if (msgType == TSDB_MSG_TYPE_REMOVE) { + vnodeProcessRemoveMeterRequest(content); + } else if (msgType == TSDB_MSG_TYPE_FREE_VNODE) { + vnodeProcessFreeVnodeRequest(content); + } else if (msgType == TSDB_MSG_TYPE_ALTER_STREAM) { + vnodeProcessAlterStreamRequest(content); + } else { + dError("%s is not processed", taosMsg[msgType]); + } + + free(sched->msg); +} + +int vnodeProcessMeterCfgRsp(char *pMsg) { + int code = *pMsg; + + if (code == 0) { + vnodeProcessCreateMeterMsg(pMsg + 1); + } else { + STaosRsp *pRsp; + pRsp = (STaosRsp *)pMsg; + int32_t *pint = (int32_t *)pRsp->more; + int vnode = htonl(*pint); + int sid = htonl(*(pint + 1)); + dError("vid:%d, sid:%d, meter is not configured, remove it", vnode, sid); + int ret = vnodeRemoveMeterObj(vnode, sid); + dTrace("vid:%d, sid:%d, meter delete ret:%d", vnode, sid, ret); + } + + return 0; +} + +int vnodeProcessCreateMeterRequest(char *pMsg) { + SCreateMsg *pCreate; + int code = 0; + int vid; + SVnodeObj * pVnode; + char * pStart; + + pCreate = (SCreateMsg *)pMsg; + vid = htons(pCreate->vnode); + + if (vid >= TSDB_MAX_VNODES || vid < 0) { + dError("vid:%d, vnode is out of range", vid); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _over; + } + + pVnode = vnodeList + vid; + if (pVnode->cfg.maxSessions <= 0) { + dError("vid:%d, not activated", vid); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _over; + } + + code = vnodeProcessCreateMeterMsg(pMsg); + +_over: + + pStart = (char *)malloc(128); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_CREATE_RSP; + pMsg = pStart + 1; + + *pMsg = code; + vnodeSendMsgToMgmt(pStart); + + return code; +} + +int vnodeProcessAlterStreamRequest(char *pMsg) { + SAlterStreamMsg *pAlter; + int code = 0; + int vid, sid; + SVnodeObj * pVnode; + char * pStart; + + pAlter = (SAlterStreamMsg *)pMsg; + vid = htons(pAlter->vnode); + sid = htonl(pAlter->sid); + + if (vid >= TSDB_MAX_VNODES || vid < 0) { + dError("vid:%d, vnode is out of range", vid); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _over; + } + + pVnode = vnodeList + vid; + if (pVnode->cfg.maxSessions <= 0 || pVnode->pCachePool == NULL) { + dError("vid:%d is not activated yet", pAlter->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _over; + } + + if (pAlter->sid >= pVnode->cfg.maxSessions || pAlter->sid < 0) { + dError("vid:%d sid:%d uid:%ld, sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _over; + } + + SMeterObj *pMeterObj = vnodeList[vid].meterList[sid]; + if (pMeterObj == NULL || sid != pMeterObj->sid || vid != pMeterObj->vnode) { + dError("vid:%d sid:%d, no active session", vid, sid); + code = TSDB_CODE_NOT_ACTIVE_SESSION; + goto _over; + } + + pMeterObj->status = pAlter->status; + if (pMeterObj->status == 1) { + if (pAlter->stime > pMeterObj->lastKey) // starting time can be specified + pMeterObj->lastKey = pAlter->stime; + vnodeCreateStream(pMeterObj); + } else { + vnodeRemoveStream(pMeterObj); + } + + vnodeSaveMeterObjToFile(pMeterObj); + +_over: + pStart = (char *)malloc(128); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_ALTER_STREAM_RSP; + pMsg = pStart + 1; + + *pMsg = code; + vnodeSendMsgToMgmt(pStart); + + return code; +} + +int vnodeProcessCreateMeterMsg(char *pMsg) { + int code; + SMeterObj * pObj = NULL; + SConnSec connSec; + SCreateMsg *pCreate = (SCreateMsg *)pMsg; + + pCreate->vnode = htons(pCreate->vnode); + pCreate->sid = htonl(pCreate->sid); + pCreate->lastCreate = htobe64(pCreate->lastCreate); + pCreate->timeStamp = htobe64(pCreate->timeStamp); + + if (pCreate->vnode >= TSDB_MAX_VNODES || pCreate->vnode < 0) { + dError("vid:%d is out of range", pCreate->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _create_over; + } + + SVnodeObj *pVnode = vnodeList + pCreate->vnode; + if (pVnode->pCachePool == NULL) { + dError("vid:%d is not activated yet", pCreate->vnode); + vnodeSendVpeerCfgMsg(pCreate->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _create_over; + } + + if (pCreate->sid >= pVnode->cfg.maxSessions || pCreate->sid < 0) { + dError("vid:%d sid:%d id:%s, sid is out of range", pCreate->vnode, pCreate->sid, pCreate->meterId); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _create_over; + } + + pCreate->numOfColumns = htons(pCreate->numOfColumns); + if (pCreate->numOfColumns <= 0) { + dTrace("vid:%d sid:%d id:%s, numOfColumns is out of range", pCreate->vnode, pCreate->sid, pCreate->meterId); + code = TSDB_CODE_OTHERS; + goto _create_over; + } + + pCreate->sqlLen = htons(pCreate->sqlLen); + pObj = (SMeterObj *)calloc(1, sizeof(SMeterObj) + pCreate->sqlLen + 1); + if (pObj == NULL) { + dError("vid:%d sid:%d id:%s, no memory to allocate meterObj", pCreate->vnode, pCreate->sid, pCreate->meterId); + code = TSDB_CODE_NO_RESOURCE; + goto _create_over; + } + + /* + * memory alignment may cause holes in SColumn struct which are not assigned any value + * therefore, we could not use memcmp to compare whether two SColumns are equal or not. + * So, we need to set the memory to 0 when allocating memory. + */ + pObj->schema = (SColumn *)calloc(1, pCreate->numOfColumns * sizeof(SColumn)); + + pObj->vnode = pCreate->vnode; + pObj->sid = pCreate->sid; + pObj->uid = pCreate->uid; + memcpy(pObj->meterId, pCreate->meterId, TSDB_METER_ID_LEN); + pObj->numOfColumns = pCreate->numOfColumns; + pObj->timeStamp = pCreate->timeStamp; + pObj->sversion = htonl(pCreate->sversion); + pObj->maxBytes = 0; + + for (int i = 0; i < pObj->numOfColumns; ++i) { + pObj->schema[i].type = pCreate->schema[i].type; + pObj->schema[i].bytes = htons(pCreate->schema[i].bytes); + pObj->schema[i].colId = htons(pCreate->schema[i].colId); + pObj->bytesPerPoint += pObj->schema[i].bytes; + if (pObj->maxBytes < pObj->schema[i].bytes) pObj->maxBytes = pObj->schema[i].bytes; + } + + if (pCreate->sqlLen > 0) { + pObj->sqlLen = pCreate->sqlLen; + pObj->pSql = ((char *)pObj) + sizeof(SMeterObj); + memcpy(pObj->pSql, (char *)pCreate->schema + pCreate->numOfColumns * sizeof(SMColumn), pCreate->sqlLen); + pObj->pSql[pCreate->sqlLen] = 0; + } + + pObj->pointsPerFileBlock = pVnode->cfg.rowsInFileBlock; + + if (sizeof(TSKEY) != pObj->schema[0].bytes) { + dError("key length is not matched, required key length:%d", sizeof(TSKEY)); + code = TSDB_CODE_OTHERS; + goto _create_over; + } + + // security info shall be saved here + connSec.spi = pCreate->spi; + connSec.encrypt = pCreate->encrypt; + memcpy(connSec.secret, pCreate->secret, TSDB_KEY_LEN); + memcpy(connSec.cipheringKey, pCreate->cipheringKey, TSDB_KEY_LEN); + + code = vnodeCreateMeterObj(pObj, &connSec); + +_create_over: + if (code != TSDB_CODE_SUCCESS) { + dTrace("vid:%d sid:%d id:%s, failed to create meterObj", pCreate->vnode, pCreate->sid, pCreate->meterId); + tfree(pObj); + } + + return code; +} + +int vnodeProcessRemoveMeterRequest(char *pMsg) { + SMeterObj * pObj; + SRemoveMeterMsg *pRemove; + int code = 0; + char * pStart; + + pRemove = (SRemoveMeterMsg *)pMsg; + pRemove->vnode = htons(pRemove->vnode); + pRemove->sid = htonl(pRemove->sid); + + if (pRemove->vnode < 0 || pRemove->vnode >= TSDB_MAX_VNODES) { + dWarn("vid:%d sid:%d, already removed", pRemove->vnode, pRemove->sid); + goto _remove_over; + } + + if (vnodeList[pRemove->vnode].meterList == NULL) goto _remove_over; + + pObj = vnodeList[pRemove->vnode].meterList[pRemove->sid]; + if (pObj == NULL) goto _remove_over; + + if (memcmp(pObj->meterId, pRemove->meterId, TSDB_METER_ID_LEN) != 0) { + dWarn("vid:%d sid:%d id:%s, remove ID:%s, meter ID not matched", pObj->vnode, pObj->sid, pObj->meterId, + pRemove->meterId); + goto _remove_over; + } + + if (vnodeRemoveMeterObj(pRemove->vnode, pRemove->sid) == TSDB_CODE_ACTION_IN_PROGRESS) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + goto _remove_over; + } + + dTrace("vid:%d sid:%d id:%s, meterObj is removed", pRemove->vnode, pRemove->sid, pRemove->meterId); + +_remove_over: + + pStart = (char *)malloc(128); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_REMOVE_RSP; + pMsg = pStart + 1; + + *pMsg = code; + vnodeSendMsgToMgmt(pStart); + + return 0; +} + +int vnodeProcessVPeerCfg(char *msg) { + SVPeersMsg *pMsg = (SVPeersMsg *)msg; + int vnode; + + vnode = htonl(pMsg->vnode); + if (vnode >= TSDB_MAX_VNODES) { + dError("vid:%d, vnode is out of range", vnode); + return -1; + } + + if (vnodeList[vnode].status == TSDB_STATUS_CREATING) { + dTrace("vid:%d, vnode is still under creating", vnode); + return 0; + } + + SVnodeCfg *pCfg = &pMsg->cfg; + pCfg->vgId = htonl(pCfg->vgId); + pCfg->maxSessions = htonl(pCfg->maxSessions); + pCfg->cacheBlockSize = htonl(pCfg->cacheBlockSize); + pCfg->cacheNumOfBlocks.totalBlocks = htonl(pCfg->cacheNumOfBlocks.totalBlocks); + pCfg->daysPerFile = htonl(pCfg->daysPerFile); + pCfg->daysToKeep1 = htonl(pCfg->daysToKeep1); + pCfg->daysToKeep2 = htonl(pCfg->daysToKeep2); + pCfg->daysToKeep = htonl(pCfg->daysToKeep); + pCfg->commitTime = htonl(pCfg->commitTime); + pCfg->blocksPerMeter = htons(pCfg->blocksPerMeter); + pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock); + + dTrace("vid:%d, vgroup:%d, vpeer cfg received, sessions:%d, current session:%d", vnode, pCfg->vgId, pCfg->maxSessions, + vnodeList[vnode].cfg.maxSessions); + + if (vnodeList[vnode].cfg.maxSessions == 0) { + if (pCfg->maxSessions > 0) { + return vnodeCreateVnode(vnode, pCfg, pMsg->vpeerDesc); + } + } else { + if (pCfg->maxSessions <= 0) { + vnodeRemoveVnode(vnode); + } + } + + return 0; +} + +int vnodeProcessVPeerCfgRsp(char *msg) { + STaosRsp *pRsp; + + pRsp = (STaosRsp *)msg; + + if (pRsp->code == 0) { + vnodeProcessVPeerCfg(pRsp->more); + } else { + int32_t *pint = (int32_t *)pRsp->more; + int vnode = htonl(*pint); + if (vnode < TSDB_MAX_VNODES && vnodeList[vnode].lastKey != 0) { + dError("vnode:%d not configured, it shall be empty"); + vnodeRemoveVnode(vnode); + } else { + dTrace("vnode:%d is invalid", vnode); + } + } + + return 0; +} + +int vnodeProcessVPeersMsg(char *msg) { + int code = 0; + char *pStart, *pMsg; + + code = vnodeProcessVPeerCfg(msg); + + STaosRsp * pRsp; + SVPeersMsg *pVPeersMsg = (SVPeersMsg *)msg; + + pStart = (char *)malloc(128); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_VPEERS_RSP; + pMsg = pStart + 1; + + pRsp = (STaosRsp *)pMsg; + pRsp->code = code; + memcpy(pRsp->more, pVPeersMsg->cfg.db, TSDB_DB_NAME_LEN); + + vnodeSendMsgToMgmt(pStart); + + return code; +} + +int vnodeProcessFreeVnodeRequest(char *pMsg) { + SFreeVnodeMsg *pFree; + char * pStart; + + pFree = (SFreeVnodeMsg *)pMsg; + pFree->vnode = htons(pFree->vnode); + + if (pFree->vnode < 0 || pFree->vnode >= TSDB_MAX_VNODES) { + dWarn("vid:%d out of range", pFree->vnode); + return -1; + } + + dTrace("vid:%d receive free vnode message", pFree->vnode); + vnodeRemoveVnode(pFree->vnode); + + pStart = (char *)malloc(128); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_FREE_VNODE_RSP; + pMsg = pStart + 1; + + *pMsg = 0; + vnodeSendMsgToMgmt(pStart); + + return 0; +} + +int vnodeSendVpeerCfgMsg(int vnode) { + SVpeerCfgMsg *pCfg; + char * pStart, *pMsg; + + pStart = (char *)malloc(256); + if (pStart == NULL) return -1; + + *pStart = TSDB_MSG_TYPE_VPEER_CFG; + pMsg = pStart + 1; + + pCfg = (SVpeerCfgMsg *)pMsg; + pCfg->vnode = htonl(vnode); + pMsg += sizeof(SVpeerCfgMsg); + + vnodeSendMsgToMgmt(pStart); + + return 0; +} + +int vnodeSendMeterCfgMsg(int vnode, int sid) { + SMeterCfgMsg *pCfg; + char * pStart, *pMsg; + + pStart = (char *)malloc(256); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_METER_CFG; + pMsg = pStart + 1; + + pCfg = (SMeterCfgMsg *)pMsg; + pCfg->vnode = htonl(vnode); + pCfg->sid = htonl(sid); + pMsg += sizeof(SMeterCfgMsg); + + vnodeSendMsgToMgmt(pStart); + + return 0; +} diff --git a/src/system/src/dnodeService.c b/src/system/src/dnodeService.c new file mode 100644 index 000000000000..c3eab50048ac --- /dev/null +++ b/src/system/src/dnodeService.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dnodeSystem.h" +#include "tglobalcfg.h" +#include "tsdb.h" +#include "vnode.h" + +/* Termination handler */ +void signal_handler(int signum, siginfo_t *sigInfo, void *context) { + if (signum == SIGUSR1) { + tsCfgDynamicOptions("debugFlag 135"); + return; + } + if (signum == SIGUSR2) { + tsCfgDynamicOptions("resetlog"); + return; + } + syslog(LOG_INFO, "Shut down signal is %d", signum); + syslog(LOG_INFO, "Shutting down TDengine service..."); + // clean the system. + dPrint("shut down signal is %d, sender PID:%d", signum, sigInfo->si_pid); + dnodeCleanUpSystem(); + // close the syslog + syslog(LOG_INFO, "Shut down TDengine service successfully"); + dPrint("TDengine is shut down!"); + closelog(); + exit(EXIT_SUCCESS); +} + +int main(int argc, char *argv[]) { + // Set global configuration file + for (int i = 1; i < argc; ++i) { + if (strcmp(argv[i], "-c") == 0) { + if (i < argc - 1) { + strcpy(configDir, argv[++i]); + } else { + printf("'-c' requires a parameter, default:%s\n", configDir); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-V") == 0) { + printf("%s %s\n", version, compatible_version); + return 0; + } + } + + /* Set termination handler. */ + struct sigaction act; + act.sa_flags = SA_SIGINFO; + act.sa_sigaction = signal_handler; + sigaction(SIGTERM, &act, NULL); + sigaction(SIGHUP, &act, NULL); + sigaction(SIGINT, &act, NULL); + sigaction(SIGUSR1, &act, NULL); + sigaction(SIGUSR2, &act, NULL); + // sigaction(SIGABRT, &act, NULL); + + // Open /var/log/syslog file to record information. + openlog("TDengine:", LOG_PID | LOG_CONS | LOG_NDELAY, LOG_LOCAL1); + syslog(LOG_INFO, "Starting TDengine service..."); + + // Initialize the system + if (dnodeInitSystem() < 0) { + syslog(LOG_ERR, "Error initialize TDengine system"); + closelog(); + + dnodeCleanUpSystem(); + exit(EXIT_FAILURE); + } + + syslog(LOG_INFO, "Started TDengine service successfully."); + + while (1) { + sleep(1000); + } +} diff --git a/src/system/src/dnodeSystem.c b/src/system/src/dnodeSystem.c new file mode 100644 index 000000000000..c6dbaddc9442 --- /dev/null +++ b/src/system/src/dnodeSystem.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "mgmt.h" +#include "vnode.h" + +#include "dnodeSystem.h" +#include "httpSystem.h" +#include "monitorSystem.h" +#include "tcrc32c.h" +#include "tglobalcfg.h" +#include "vnode.h" + +SModule tsModule[TSDB_MOD_MAX]; +uint32_t tsModuleStatus; +pthread_mutex_t dmutex; +extern int vnodeSelectReqNum; +extern int vnodeInsertReqNum; +bool tsDnodeStopping = false; + +void dnodeCountRequest(SCountInfo *info); + +void dnodeInitModules() { + tsModule[TSDB_MOD_HTTP].name = "http"; + tsModule[TSDB_MOD_HTTP].initFp = httpInitSystem; + tsModule[TSDB_MOD_HTTP].cleanUpFp = httpCleanUpSystem; + tsModule[TSDB_MOD_HTTP].startFp = httpStartSystem; + tsModule[TSDB_MOD_HTTP].stopFp = httpStopSystem; + tsModule[TSDB_MOD_HTTP].num = tsEnableHttpModule ? -1 : 0; + tsModule[TSDB_MOD_HTTP].curNum = 0; + tsModule[TSDB_MOD_HTTP].equalVnodeNum = 0; + + tsModule[TSDB_MOD_MONITOR].name = "monitor"; + tsModule[TSDB_MOD_MONITOR].initFp = monitorInitSystem; + tsModule[TSDB_MOD_MONITOR].cleanUpFp = monitorCleanUpSystem; + tsModule[TSDB_MOD_MONITOR].startFp = monitorStartSystem; + tsModule[TSDB_MOD_MONITOR].stopFp = monitorStopSystem; + tsModule[TSDB_MOD_MONITOR].num = tsEnableMonitorModule ? -1 : 0; + tsModule[TSDB_MOD_MONITOR].curNum = 0; + tsModule[TSDB_MOD_MONITOR].equalVnodeNum = 0; +} + +void dnodeCleanUpSystem() { + if (tsDnodeStopping) return; + tsDnodeStopping = true; + + for (int mod = 0; mod < TSDB_MOD_MAX; ++mod) { + if (tsModule[mod].num != 0 && tsModule[mod].stopFp) (*tsModule[mod].stopFp)(); + if (tsModule[mod].num != 0 && tsModule[mod].cleanUpFp) (*tsModule[mod].cleanUpFp)(); + } + + mgmtCleanUpSystem(); + vnodeCleanUpVnodes(); + + taosCloseLogger(); +} + +void taosCreateTierDirectory() { + char fileName[128]; + + sprintf(fileName, "%s/tsdb", tsDirectory); + mkdir(fileName, 0755); + + sprintf(fileName, "%s/data", tsDirectory); + mkdir(fileName, 0755); +} + +int dnodeInitSystem() { + char temp[128]; + struct stat dirstat; + + taosResolveCRC(); + + tsRebootTime = taosGetTimestampSec(); + tscEmbedded = 1; + + // Read global configuration. + tsReadGlobalLogConfig(); + + if (stat(logDir, &dirstat) < 0) mkdir(logDir, 0755); + + sprintf(temp, "%s/taosdlog", logDir); + if (taosInitLog(temp, tsNumOfLogLines, 1) < 0) printf("failed to init log file\n"); + + if (!tsReadGlobalConfig()) { // TODO : Change this function + tsPrintGlobalConfig(); + dError("TDengine read global config failed"); + return -1; + } + + strcpy(tsDirectory, dataDir); + taosCreateTierDirectory(); + + sprintf(mgmtDirectory, "%s/mgmt", tsDirectory); + sprintf(tsDirectory, "%s/tsdb", dataDir); + + tsPrintGlobalConfig(); + dPrint("Server IP address is:%s", tsInternalIp); + + signal(SIGPIPE, SIG_IGN); + + dnodeInitModules(); + pthread_mutex_init(&dmutex, NULL); + + dPrint("starting to initialize TDengine engine ..."); + + for (int mod = 0; mod < TSDB_MOD_MAX; ++mod) { + if (tsModule[mod].num != 0 && tsModule[mod].initFp) { + if ((*tsModule[mod].initFp)() != 0) { + dError("TDengine initialization failed"); + return -1; + } + } + } + + if (vnodeInitSystem() != 0) { + dError("TDengine vnodes initialization failed"); + return -1; + } + + if (mgmtInitSystem() != 0) { + dError("TDengine mgmt initialization failed"); + return -1; + } + + monitorCountReqFp = dnodeCountRequest; + + for (int mod = 0; mod < TSDB_MOD_MAX; ++mod) { + if (tsModule[mod].num != 0 && tsModule[mod].startFp) { + if ((*tsModule[mod].startFp)() != 0) { + dError("failed to start TDengine module:%d", mod); + return -1; + } + } + } + + dPrint("TDengine is initialized successfully"); + + return 0; +} + +void dnodeCountRequest(SCountInfo *info) { + httpGetReqCount(&info->httpReqNum); + info->selectReqNum = __sync_fetch_and_and(&vnodeSelectReqNum, 0); + info->insertReqNum = __sync_fetch_and_and(&vnodeInsertReqNum, 0); +} diff --git a/src/system/src/mgmtAcct.c b/src/system/src/mgmtAcct.c new file mode 100644 index 000000000000..6ad2b3a46175 --- /dev/null +++ b/src/system/src/mgmtAcct.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include + +#include "mgmt.h" +#include "tschemautil.h" + +SAcctObj acctObj; + +int mgmtAddDbIntoAcct(SAcctObj *pAcct, SDbObj *pDb) { + pthread_mutex_lock(&pAcct->mutex); + pDb->next = pAcct->pHead; + pDb->prev = NULL; + + if (pAcct->pHead) pAcct->pHead->prev = pDb; + + pAcct->pHead = pDb; + pAcct->acctInfo.numOfDbs++; + pthread_mutex_unlock(&pAcct->mutex); + + return 0; +} + +int mgmtRemoveDbFromAcct(SAcctObj *pAcct, SDbObj *pDb) { + pthread_mutex_lock(&pAcct->mutex); + if (pDb->prev) pDb->prev->next = pDb->next; + + if (pDb->next) pDb->next->prev = pDb->prev; + + if (pDb->prev == NULL) pAcct->pHead = pDb->next; + + pAcct->acctInfo.numOfDbs--; + pthread_mutex_unlock(&pAcct->mutex); + + return 0; +} + +int mgmtAddUserIntoAcct(SAcctObj *pAcct, SUserObj *pUser) { + pthread_mutex_lock(&pAcct->mutex); + pUser->next = pAcct->pUser; + pUser->prev = NULL; + + if (pAcct->pUser) pAcct->pUser->prev = pUser; + + pAcct->pUser = pUser; + pAcct->acctInfo.numOfUsers++; + pthread_mutex_unlock(&pAcct->mutex); + + return 0; +} + +int mgmtRemoveUserFromAcct(SAcctObj *pAcct, SUserObj *pUser) { + pthread_mutex_lock(&pAcct->mutex); + if (pUser->prev) pUser->prev->next = pUser->next; + + if (pUser->next) pUser->next->prev = pUser->prev; + + if (pUser->prev == NULL) pAcct->pUser = pUser->next; + + pAcct->acctInfo.numOfUsers--; + pthread_mutex_unlock(&pAcct->mutex); + + return 0; +} + +int mgmtAddConnIntoAcct(SConnObj *pConn) { + SAcctObj *pAcct = pConn->pAcct; + if (pAcct == NULL) return 0; + + pthread_mutex_lock(&pAcct->mutex); + + assert(pConn != pAcct->pConn); + + pConn->next = pAcct->pConn; + pConn->prev = NULL; + + if (pAcct->pConn) pAcct->pConn->prev = pConn; + + pAcct->pConn = pConn; + pAcct->acctInfo.numOfConns++; + + pthread_mutex_unlock(&pAcct->mutex); + + return 0; +} + +int mgmtRemoveConnFromAcct(SConnObj *pConn) { + SAcctObj *pAcct = pConn->pAcct; + if (pAcct == NULL) return 0; + + pthread_mutex_lock(&pAcct->mutex); + + if (pConn->prev) pConn->prev->next = pConn->next; + + if (pConn->next) pConn->next->prev = pConn->prev; + + if (pConn->prev == NULL) pAcct->pConn = pConn->next; + + pAcct->acctInfo.numOfConns--; + // pAcct->numOfUsers--; + + if (pConn->pQList) { + pAcct->acctInfo.numOfQueries -= pConn->pQList->numOfQueries; + pAcct->acctInfo.numOfStreams -= pConn->pSList->numOfStreams; + } + + pthread_mutex_unlock(&pAcct->mutex); + + pConn->next = NULL; + pConn->prev = NULL; + + return 0; +} + +void mgmtCheckAcct() { + SAcctObj *pAcct = &acctObj; + pAcct->acctId = 0; + strcpy(pAcct->user, "root"); + + mgmtCreateUser(pAcct, "root", "taosdata"); + mgmtCreateUser(pAcct, "monitor", tsInternalPass); + mgmtCreateUser(pAcct, "_root", tsInternalPass); +} diff --git a/src/system/src/mgmtConn.c b/src/system/src/mgmtConn.c new file mode 100644 index 000000000000..c5621bd34326 --- /dev/null +++ b/src/system/src/mgmtConn.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "mgmt.h" +#include +#include "taosmsg.h" +#include "tschemautil.h" + +typedef struct { + char user[TSDB_METER_ID_LEN]; + uint64_t stime; + uint32_t ip; + short port; +} SConnInfo; + +typedef struct { + int numOfConns; + int index; + SConnInfo connInfo[]; +} SConnShow; + +int mgmtGetConns(SShowObj *pShow, SConnObj *pConn) { + SAcctObj * pAcct = pConn->pAcct; + SConnShow *pConnShow; + + pthread_mutex_lock(&pAcct->mutex); + + pConnShow = malloc(sizeof(SConnInfo) * pAcct->acctInfo.numOfConns + sizeof(SConnShow)); + pConnShow->index = 0; + pConnShow->numOfConns = 0; + + if (pAcct->acctInfo.numOfConns > 0) { + pConn = pAcct->pConn; + SConnInfo *pConnInfo = pConnShow->connInfo; + + while (pConn) { + strcpy(pConnInfo->user, pConn->pUser->user); + pConnInfo->ip = pConn->ip; + pConnInfo->port = pConn->port; + pConnInfo->stime = pConn->stime; + + pConnShow->numOfConns++; + pConnInfo++; + pConn = pConn->next; + } + } + + pthread_mutex_unlock(&pAcct->mutex); + + // sorting based on useconds + + pShow->pNode = pConnShow; + + return 0; +} + +int mgmtGetConnsMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + pShow->bytes[cols] = TSDB_METER_NAME_LEN; + SSchema *pSchema = tsGetSchema(pMeta); + + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "user"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_IPv4ADDR_LEN + 6; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "ip:port"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "login time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->numOfRows = 1000000; + pShow->pNode = NULL; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + mgmtGetConns(pShow, pConn); + return 0; +} + +int mgmtRetrieveConns(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + char *pWrite; + int cols = 0; + + SConnShow *pConnShow = (SConnShow *)pShow->pNode; + + if (rows > pConnShow->numOfConns - pConnShow->index) rows = pConnShow->numOfConns - pConnShow->index; + + while (numOfRows < rows) { + SConnInfo *pNode = pConnShow->connInfo + pConnShow->index; + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pNode->user); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + uint32_t ip = pNode->ip; + sprintf(pWrite, "%d.%d.%d.%d:%hu", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24, htons(pNode->port)); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pNode->stime; + cols++; + + numOfRows++; + pConnShow->index++; + } + + if (numOfRows == 0) { + tfree(pConnShow); + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} diff --git a/src/system/src/mgmtDb.c b/src/system/src/mgmtDb.c new file mode 100644 index 000000000000..143828f30de4 --- /dev/null +++ b/src/system/src/mgmtDb.c @@ -0,0 +1,792 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "mgmt.h" +#include +#include "tschemautil.h" + +void *dbSdb = NULL; +int tsDbUpdateSize; + +void *(*mgmtDbActionFp[SDB_MAX_ACTION_TYPES])(void *row, char *str, int size, int *ssize); +void *mgmtDbActionInsert(void *row, char *str, int size, int *ssize); +void *mgmtDbActionDelete(void *row, char *str, int size, int *ssize); +void *mgmtDbActionUpdate(void *row, char *str, int size, int *ssize); +void *mgmtDbActionEncode(void *row, char *str, int size, int *ssize); +void *mgmtDbActionDecode(void *row, char *str, int size, int *ssize); +void *mgmtDbActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtDbActionBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtDbActionAfterBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtDbActionReset(void *row, char *str, int size, int *ssize); +void *mgmtDbActionDestroy(void *row, char *str, int size, int *ssize); + +void mgmtDbActionInit() { + mgmtDbActionFp[SDB_TYPE_INSERT] = mgmtDbActionInsert; + mgmtDbActionFp[SDB_TYPE_DELETE] = mgmtDbActionDelete; + mgmtDbActionFp[SDB_TYPE_UPDATE] = mgmtDbActionUpdate; + mgmtDbActionFp[SDB_TYPE_ENCODE] = mgmtDbActionEncode; + mgmtDbActionFp[SDB_TYPE_DECODE] = mgmtDbActionDecode; + mgmtDbActionFp[SDB_TYPE_BEFORE_BATCH_UPDATE] = mgmtDbActionBeforeBatchUpdate; + mgmtDbActionFp[SDB_TYPE_BATCH_UPDATE] = mgmtDbActionBatchUpdate; + mgmtDbActionFp[SDB_TYPE_AFTER_BATCH_UPDATE] = mgmtDbActionAfterBatchUpdate; + mgmtDbActionFp[SDB_TYPE_RESET] = mgmtDbActionReset; + mgmtDbActionFp[SDB_TYPE_DESTROY] = mgmtDbActionDestroy; +} + +void *mgmtDbAction(char action, void *row, char *str, int size, int *ssize) { + if (mgmtDbActionFp[action] != NULL) { + return (*(mgmtDbActionFp[action]))(row, str, size, ssize); + } + return NULL; +} + +void mgmtGetAcctStr(char *src, char *dest) { + char *pos = strstr(src, TS_PATH_DELIMITER); + while ((pos != NULL) && (*src != *pos)) { + *dest = *src; + src++; + dest++; + } + + *dest = 0; +} + +int mgmtInitDbs() { + void * pNode = NULL; + SDbObj *pDb = NULL; + + mgmtDbActionInit(); + + dbSdb = sdbOpenTable(tsMaxDbs, sizeof(SDbObj), "db", SDB_KEYTYPE_STRING, mgmtDirectory, mgmtDbAction); + if (dbSdb == NULL) { + mError("failed to init db data"); + return -1; + } + + while (1) { + pNode = sdbFetchRow(dbSdb, pNode, (void **)&pDb); + if (pDb == NULL) break; + + pDb->pHead = NULL; + pDb->pTail = NULL; + pDb->prev = NULL; + pDb->next = NULL; + pDb->numOfTables = 0; + pDb->numOfVgroups = 0; + pDb->numOfMetrics = 0; + pDb->vgStatus = TSDB_VG_STATUS_READY; + pDb->vgTimer = NULL; + pDb->pMetric = NULL; + mgmtAddDbIntoAcct(&acctObj, pDb); + } + + SDbObj tObj; + tsDbUpdateSize = tObj.updateEnd - (char *)&tObj; + + mTrace("db data is initialized"); + return 0; +} + +SDbObj *mgmtGetDb(char *db) { return (SDbObj *)sdbGetRow(dbSdb, db); } + +SDbObj *mgmtGetDbByMeterId(char *meterId) { + char db[TSDB_METER_ID_LEN], *pos; + + pos = strstr(meterId, TS_PATH_DELIMITER); + pos = strstr(pos + 1, TS_PATH_DELIMITER); + memset(db, 0, sizeof(db)); + strncpy(db, meterId, pos - meterId); + + return (SDbObj *)sdbGetRow(dbSdb, db); +} + +int mgmtCheckDbParams(SCreateDbMsg *pCreate) { + // assign default parameters + if (pCreate->maxSessions < 0) pCreate->maxSessions = tsSessionsPerVnode; // + if (pCreate->cacheBlockSize < 0) pCreate->cacheBlockSize = tsCacheBlockSize; // + if (pCreate->daysPerFile < 0) pCreate->daysPerFile = tsDaysPerFile; // + if (pCreate->daysToKeep < 0) pCreate->daysToKeep = tsDaysToKeep; // + if (pCreate->daysToKeep1 < 0) pCreate->daysToKeep1 = pCreate->daysToKeep; // + if (pCreate->daysToKeep2 < 0) pCreate->daysToKeep2 = pCreate->daysToKeep; // + if (pCreate->commitTime < 0) pCreate->commitTime = tsCommitTime; // + if (pCreate->compression < 0) pCreate->compression = tsCompression; // + if (pCreate->commitLog < 0) pCreate->commitLog = tsCommitLog; + if (pCreate->replications < 0) pCreate->replications = 1; // + if (pCreate->rowsInFileBlock < 0) pCreate->rowsInFileBlock = tsRowsInFileBlock; // + if (pCreate->cacheNumOfBlocks.fraction < 0) pCreate->cacheNumOfBlocks.fraction = tsAverageCacheBlocks; // + pCreate->replications = 1; + + if (pCreate->commitLog < 0 || pCreate->commitLog > 1) { + mTrace("invalid db option commitLog: %d", pCreate->commitLog); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->daysPerFile < TSDB_FILE_MIN_PARTITION_RANGE || pCreate->daysPerFile > TSDB_FILE_MAX_PARTITION_RANGE) { + mTrace("invalid db option daysPerFile: %d valid range: %d--%d", pCreate->daysPerFile, TSDB_FILE_MIN_PARTITION_RANGE, + TSDB_FILE_MAX_PARTITION_RANGE); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->daysToKeep1 > pCreate->daysToKeep2 || pCreate->daysToKeep2 > pCreate->daysToKeep) { + mTrace("invalid db option daystokeep1: %d, daystokeep2: %d, daystokeep: %d", pCreate->daysToKeep1, + pCreate->daysToKeep2, pCreate->daysToKeep); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->daysToKeep1 < TSDB_FILE_MIN_PARTITION_RANGE || pCreate->daysToKeep1 < pCreate->daysPerFile) { + mTrace("invalid db option daystokeep: %d", pCreate->daysToKeep); + return TSDB_CODE_INVALID_OPTION; + } + if (pCreate->rowsInFileBlock < TSDB_MIN_ROWS_IN_FILEBLOCK || pCreate->rowsInFileBlock > TSDB_MAX_ROWS_IN_FILEBLOCK) { + mTrace("invalid db option rowsInFileBlock: %d valid range: %d--%d", pCreate->rowsInFileBlock, + TSDB_MIN_ROWS_IN_FILEBLOCK, TSDB_MAX_ROWS_IN_FILEBLOCK); + return TSDB_CODE_INVALID_OPTION; + } + if (pCreate->cacheBlockSize < TSDB_MIN_CACHE_BLOCK_SIZE || pCreate->cacheBlockSize > TSDB_MAX_CACHE_BLOCK_SIZE) { + mTrace("invalid db option cacheBlockSize: %d valid range: %d--%d", pCreate->cacheBlockSize, + TSDB_MIN_CACHE_BLOCK_SIZE, TSDB_MAX_CACHE_BLOCK_SIZE); + return TSDB_CODE_INVALID_OPTION; + } + if (pCreate->maxSessions < TSDB_MIN_TABLES_PER_VNODE || pCreate->maxSessions > TSDB_MAX_TABLES_PER_VNODE) { + mTrace("invalid db option maxSessions: %d valid range: %d--%d", pCreate->maxSessions, TSDB_MIN_TABLES_PER_VNODE, + TSDB_MAX_TABLES_PER_VNODE); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) { + mTrace("invalid db option timePrecision: %d valid value: %d,%d", pCreate->precision, TSDB_TIME_PRECISION_MILLI, + TSDB_TIME_PRECISION_MICRO); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->cacheNumOfBlocks.fraction < 0 || pCreate->cacheNumOfBlocks.fraction > TSDB_MAX_AVG_BLOCKS) { + mTrace("invalid db option ablocks: %d valid value: %d,%d", pCreate->precision, 0, TSDB_MAX_AVG_BLOCKS); + return TSDB_CODE_INVALID_OPTION; + } else { + pCreate->cacheNumOfBlocks.totalBlocks = (int32_t)(pCreate->cacheNumOfBlocks.fraction * pCreate->maxSessions); + } + + if (pCreate->cacheNumOfBlocks.totalBlocks > TSDB_MAX_CACHE_BLOCKS) { + mTrace("invalid db option cacheNumOfBlocks: %d valid range: %d", pCreate->cacheNumOfBlocks.totalBlocks, + TSDB_MAX_CACHE_BLOCKS); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->commitTime < TSDB_MIN_COMMIT_TIME_INTERVAL || pCreate->commitTime > TSDB_MAX_COMMIT_TIME_INTERVAL) { + mTrace("invalid db option commitTime: %d valid range: %d-%d", pCreate->commitTime, TSDB_MIN_COMMIT_TIME_INTERVAL, + TSDB_MAX_COMMIT_TIME_INTERVAL); + return TSDB_CODE_INVALID_OPTION; + } + if (pCreate->compression > TSDB_MAX_COMPRESSION_LEVEL) { + mTrace("invalid db option compression: %d", pCreate->compression, TSDB_MIN_COMMIT_TIME_INTERVAL, + TSDB_MAX_COMMIT_TIME_INTERVAL); + return TSDB_CODE_INVALID_OPTION; + } + + if (pCreate->blocksPerMeter < 0) pCreate->blocksPerMeter = tsNumOfBlocksPerMeter; + if (pCreate->blocksPerMeter > pCreate->cacheNumOfBlocks.totalBlocks * 3 / 4) { + pCreate->blocksPerMeter = pCreate->cacheNumOfBlocks.totalBlocks * 3 / 4; + } + if (pCreate->blocksPerMeter < 4) pCreate->blocksPerMeter = 4; + + pCreate->maxSessions++; + + return TSDB_CODE_SUCCESS; +} + +int mgmtCreateDb(SAcctObj *pAcct, SCreateDbMsg *pCreate) { + SDbObj *pDb; + + int numOfDbs = sdbGetNumOfRows(dbSdb); + if (numOfDbs >= tsMaxDbs) { + mWarn("numOfDbs:%d, exceed tsMaxDbs:%d", numOfDbs, tsMaxDbs); + return TSDB_CODE_TOO_MANY_DATABSES; + } + + pDb = (SDbObj *)sdbGetRow(dbSdb, pCreate->db); + if (pDb != NULL) { + return TSDB_CODE_DB_ALREADY_EXIST; + } + + int code = mgmtCheckDbParams(pCreate); + if (code != TSDB_CODE_SUCCESS) return code; + + assert(pCreate->daysToKeep1 <= pCreate->daysToKeep2 && pCreate->daysToKeep2 <= pCreate->daysToKeep); + + pDb = malloc(sizeof(SDbObj)); + memset(pDb, 0, sizeof(SDbObj)); + strcpy(pDb->name, pCreate->db); + strcpy(pCreate->acct, pAcct->user); + pDb->createdTime = taosGetTimestampMs(); + pDb->cfg = *pCreate; + + if (sdbInsertRow(dbSdb, pDb, 0) < 0) { + code = TSDB_CODE_SDB_ERROR; + tfree(pDb); + } + + return code; +} + +int mgmtUpdateDb(SDbObj *pDb) { return sdbUpdateRow(dbSdb, pDb, tsDbUpdateSize, 1); } + +int mgmtSetDbDropping(SDbObj *pDb) { + if (pDb->dropStatus == TSDB_DB_STATUS_DROP_FROM_SDB) return 0; + + SVgObj *pVgroup = pDb->pHead; + while (pVgroup != NULL) { + SDnodeObj *pDnode = &dnodeObj; + if (pDnode == NULL) continue; + + SVnodeLoad *pVload = &pDnode->vload[pVgroup->vnodeGid[0].vnode]; + if (pVload->dropStatus != TSDB_VN_STATUS_DROPPING) { + pVload->dropStatus = TSDB_VN_STATUS_DROPPING; + mPrint("vnode:%d db:%s set to dropping status", pVgroup->vnodeGid[0].vnode, pDb->name); + } + mgmtSendFreeVnodeMsg(pVgroup->vnodeGid[0].vnode); + pVgroup = pVgroup->next; + } + + if (pDb->dropStatus == TSDB_DB_STATUS_DROPPING) return 0; + + pDb->dropStatus = TSDB_DB_STATUS_DROPPING; + if (mgmtUpdateDb(pDb) < 0) { + mError("db:%s drop failed, db sdb update error", pDb->name); + return TSDB_CODE_SDB_ERROR; + } + + mPrint("db:%s set to dropping status", pDb->name); + return 0; +} + +bool mgmtCheckDropDbFinished(SDbObj *pDb) { + SVgObj *pVgroup = pDb->pHead; + while (pVgroup) { + SDnodeObj *pDnode = &dnodeObj; + + if (pDnode->status == TSDB_STATUS_OFFLINE) continue; + + SVnodeLoad *pVload = &pDnode->vload[pVgroup->vnodeGid[0].vnode]; + if (pVload->dropStatus == TSDB_VN_STATUS_DROPPING) { + mTrace("dnode:0x%x vnode:%d db:%s wait dropping", pDnode->privateIp, pVgroup->vnodeGid[0].vnode, pDb->name); + return false; + } + + pVgroup = pVgroup->next; + } + + mPrint("db:%s all vnodes drop finished", pDb->name); + return true; +} + +void mgmtDropDbFromSdb(SDbObj *pDb) { + while (pDb->pHead) mgmtDropVgroup(pDb, pDb->pHead); + + STabObj *pMetric = pDb->pMetric; + while (pMetric) { + STabObj *pNext = pMetric->next; + mgmtDropMeter(pDb, pMetric->meterId, 0); + pMetric = pNext; + } + + mPrint("db:%s all meters drop finished", pDb->name); + sdbDeleteRow(dbSdb, pDb); + mPrint("db:%s database drop finished", pDb->name); +} + +int mgmtDropDb(SDbObj *pDb) { + if (pDb->dropStatus == TSDB_DB_STATUS_DROPPING) { + bool finished = mgmtCheckDropDbFinished(pDb); + if (!finished) return TSDB_CODE_ACTION_IN_PROGRESS; + + // don't sync this action + pDb->dropStatus = TSDB_DB_STATUS_DROP_FROM_SDB; + mgmtDropDbFromSdb(pDb); + return 0; + } else { + int code = mgmtSetDbDropping(pDb); + if (code != 0) return code; + return TSDB_CODE_ACTION_IN_PROGRESS; + } +} + +int mgmtDropDbByName(SAcctObj *pAcct, char *name) { + SDbObj *pDb; + pDb = (SDbObj *)sdbGetRow(dbSdb, name); + if (pDb == NULL) { + mWarn("db:%s is not there", name); + // return TSDB_CODE_INVALID_DB; + return 0; + } + + if (taosCheckDbName(pDb->name, tsMonitorDbName)) return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + + return mgmtDropDb(pDb); +} + +void mgmtMonitorDbDrop(void *unused, void *unusedt) { + void * pNode = NULL; + SDbObj *pDb = NULL; + + while (1) { + pNode = sdbFetchRow(dbSdb, pNode, (void **)&pDb); + if (pDb == NULL) break; + if (pDb->dropStatus != TSDB_DB_STATUS_DROPPING) continue; + mgmtDropDb(pDb); + break; + } +} + +int mgmtAlterDb(SAcctObj *pAcct, SAlterDbMsg *pAlter) { + SDbObj *pDb; + int code = TSDB_CODE_SUCCESS; + + pDb = (SDbObj *)sdbGetRow(dbSdb, pAlter->db); + if (pDb == NULL) { + mTrace("db:%s is not exist", pAlter->db); + return TSDB_CODE_INVALID_DB; + } + + if (pAlter->daysToKeep > 0) { + mTrace("db:%s daysToKeep:%d change to %d", pDb->name, pDb->cfg.daysToKeep, pAlter->daysToKeep); + pDb->cfg.daysToKeep = pAlter->daysToKeep; + } + + if (sdbUpdateRow(dbSdb, pDb, tsDbUpdateSize, 1) < 0) { + return TSDB_CODE_SDB_ERROR; + } + + return code; +} + +int mgmtUseDb(SConnObj *pConn, char *name) { + SDbObj *pDb; + int code = TSDB_CODE_INVALID_DB; + + pDb = mgmtGetDb(name); + if (pDb) { + pConn->pDb = pDb; + code = 0; + } + + return code; +} + +int mgmtAddVgroupIntoDb(SDbObj *pDb, SVgObj *pVgroup) { + pVgroup->next = pDb->pHead; + pVgroup->prev = NULL; + + if (pDb->pHead) pDb->pHead->prev = pVgroup; + + if (pDb->pTail == NULL) pDb->pTail = pVgroup; + + pDb->pHead = pVgroup; + pDb->numOfVgroups++; + + return 0; +} + +int mgmtAddVgroupIntoDbTail(SDbObj *pDb, SVgObj *pVgroup) { + pVgroup->next = NULL; + pVgroup->prev = pDb->pTail; + + if (pDb->pTail) pDb->pTail->next = pVgroup; + + if (pDb->pHead == NULL) pDb->pHead = pVgroup; + + pDb->pTail = pVgroup; + pDb->numOfVgroups++; + + return 0; +} + +int mgmtRemoveVgroupFromDb(SDbObj *pDb, SVgObj *pVgroup) { + if (pVgroup->prev) pVgroup->prev->next = pVgroup->next; + + if (pVgroup->next) pVgroup->next->prev = pVgroup->prev; + + if (pVgroup->prev == NULL) pDb->pHead = pVgroup->next; + + if (pVgroup->next == NULL) pDb->pTail = pVgroup->prev; + + pDb->numOfVgroups--; + + return 0; +} + +int mgmtMoveVgroupToTail(SDbObj *pDb, SVgObj *pVgroup) { + mgmtRemoveVgroupFromDb(pDb, pVgroup); + mgmtAddVgroupIntoDbTail(pDb, pVgroup); + + return 0; +} + +int mgmtMoveVgroupToHead(SDbObj *pDb, SVgObj *pVgroup) { + mgmtRemoveVgroupFromDb(pDb, pVgroup); + mgmtAddVgroupIntoDb(pDb, pVgroup); + + return 0; +} + +int mgmtAddMetricIntoDb(SDbObj *pDb, STabObj *pMetric) { + pMetric->next = pDb->pMetric; + pMetric->prev = NULL; + + if (pDb->pMetric) pDb->pMetric->prev = pMetric; + + pDb->pMetric = pMetric; + pDb->numOfMetrics++; + + return 0; +} + +int mgmtRemoveMetricFromDb(SDbObj *pDb, STabObj *pMetric) { + if (pMetric->prev) pMetric->prev->next = pMetric->next; + + if (pMetric->next) pMetric->next->prev = pMetric->prev; + + if (pMetric->prev == NULL) pDb->pMetric = pMetric->next; + + pDb->numOfMetrics--; + + if (pMetric->pSkipList != NULL) { + tSkipListDestroy(&pMetric->pSkipList); + } + return 0; +} + +int mgmtShowTables(SAcctObj *pAcct, char *db) { + int code; + + code = 0; + + return code; +} + +void mgmtCleanUpDbs() { sdbCloseTable(dbSdb); } + +int mgmtGetDbMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = TSDB_DB_NAME_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "name"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "created time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "ntables"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + +#ifndef __CLOUD_VERSION__ + if (strcmp(pConn->pAcct->user, "root") == 0) { +#endif + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "vgroups"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; +#ifndef __CLOUD_VERSION__ + } +#endif + +#ifndef __CLOUD_VERSION__ + if (strcmp(pConn->pAcct->user, "root") == 0) { +#endif + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "replica"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "days"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; +#ifndef __CLOUD_VERSION__ + } +#endif + + pShow->bytes[cols] = 24; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "keep1,keep2,keep(D)"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + +#ifndef __CLOUD_VERSION__ + if (strcmp(pConn->pAcct->user, "root") == 0) { +#endif + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "tables"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "rows"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "cache(b)"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_FLOAT; + strcpy(pSchema[cols].name, "ablocks"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "tblocks"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "ctime(s)"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 1; + pSchema[cols].type = TSDB_DATA_TYPE_TINYINT; + strcpy(pSchema[cols].name, "clog"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 1; + pSchema[cols].type = TSDB_DATA_TYPE_TINYINT; + strcpy(pSchema[cols].name, "comp"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; +#ifndef __CLOUD_VERSION__ + } +#endif + + pShow->bytes[cols] = 3; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "time precision"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 10; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "status"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + // pShow->numOfRows = sdbGetNumOfRows (dbSdb); + pShow->numOfRows = pConn->pAcct->acctInfo.numOfDbs; + pShow->pNode = pConn->pAcct->pHead; + + return 0; +} + +char *mgmtGetDbStr(char *src) { + char *pos = strstr(src, TS_PATH_DELIMITER); + + return ++pos; +} + +int mgmtRetrieveDbs(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + SDbObj *pDb = NULL; + char * pWrite; + int cols = 0; + + while (numOfRows < rows) { + pDb = (SDbObj *)pShow->pNode; + if (pDb == NULL) break; + pShow->pNode = (void *)pDb->next; + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, mgmtGetDbStr(pDb->name)); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pDb->createdTime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pDb->numOfTables; + cols++; + +#ifndef __CLOUD_VERSION__ + if (strcmp(pConn->pAcct->user, "root") == 0) { +#endif + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pDb->numOfVgroups; + cols++; +#ifndef __CLOUD_VERSION__ + } +#endif + +#ifndef __CLOUD_VERSION__ + if (strcmp(pConn->pAcct->user, "root") == 0) { +#endif + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pDb->cfg.replications; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pDb->cfg.daysPerFile; + cols++; +#ifndef __CLOUD_VERSION__ + } +#endif + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + sprintf(pWrite, "%d,%d,%d", pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2, pDb->cfg.daysToKeep); + cols++; + +#ifndef __CLOUD_VERSION__ + if (strcmp(pConn->pAcct->user, "root") == 0) { +#endif + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pDb->cfg.maxSessions - 1; // table num can be created should minus 1 + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pDb->cfg.rowsInFileBlock; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pDb->cfg.cacheBlockSize; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(float *)pWrite = (pDb->cfg.cacheNumOfBlocks.totalBlocks * 1.0 / (pDb->cfg.maxSessions - 1)); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pDb->cfg.blocksPerMeter; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pDb->cfg.commitTime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int8_t *)pWrite = pDb->cfg.commitLog; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int8_t *)pWrite = pDb->cfg.compression; + cols++; +#ifndef __CLOUD_VERSION__ + } +#endif + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + char *prec = (pDb->cfg.precision == TSDB_TIME_PRECISION_MILLI) ? TSDB_TIME_PRECISION_MILLI_STR + : TSDB_TIME_PRECISION_MICRO_STR; + strcpy(pWrite, prec); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pDb->dropStatus != TSDB_DB_STATUS_READY ? "dropping" : "ready"); + cols++; + + numOfRows++; + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} + +void *mgmtDbActionInsert(void *row, char *str, int size, int *ssize) { + SDbObj *pDb = (SDbObj *)row; + + pDb->pHead = NULL; + pDb->pTail = NULL; + pDb->numOfVgroups = 0; + pDb->numOfTables = 0; + pDb->vgTimer = NULL; + pDb->pMetric = NULL; + mgmtAddDbIntoAcct(&acctObj, pDb); + + return NULL; +} +void *mgmtDbActionDelete(void *row, char *str, int size, int *ssize) { + SDbObj * pDb = (SDbObj *)row; + SAcctObj *pAcct = &acctObj; + mgmtRemoveDbFromAcct(pAcct, pDb); + + return NULL; +} +void *mgmtDbActionUpdate(void *row, char *str, int size, int *ssize) { + return mgmtDbActionReset(row, str, size, ssize); +} +void *mgmtDbActionEncode(void *row, char *str, int size, int *ssize) { + SDbObj *pDb = (SDbObj *)row; + int tsize = pDb->updateEnd - (char *)pDb; + if (size < tsize) { + *ssize = -1; + } else { + memcpy(str, pDb, tsize); + *ssize = tsize; + } + + return NULL; +} +void *mgmtDbActionDecode(void *row, char *str, int size, int *ssize) { + SDbObj *pDb = (SDbObj *)malloc(sizeof(SDbObj)); + if (pDb == NULL) return NULL; + memset(pDb, 0, sizeof(SDbObj)); + + int tsize = pDb->updateEnd - (char *)pDb; + memcpy(pDb, str, tsize); + + return (void *)pDb; +} +void *mgmtDbActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtDbActionBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtDbActionAfterBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtDbActionReset(void *row, char *str, int size, int *ssize) { + SDbObj *pDb = (SDbObj *)row; + int tsize = pDb->updateEnd - (char *)pDb; + memcpy(pDb, str, tsize); + + return NULL; +} +void *mgmtDbActionDestroy(void *row, char *str, int size, int *ssize) { + tfree(row); + return NULL; +} diff --git a/src/system/src/mgmtDnode.c b/src/system/src/mgmtDnode.c new file mode 100644 index 000000000000..a5ef95765ca2 --- /dev/null +++ b/src/system/src/mgmtDnode.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE + +#include +#include +#include + +#include "dnodeSystem.h" +#include "mgmt.h" +#include "tschemautil.h" +#include "tstatus.h" + +SDnodeObj dnodeObj; + +void mgmtSetDnodeMaxVnodes(SDnodeObj *pDnode) { + int maxVnodes = pDnode->numOfCores * tsNumOfVnodesPerCore; + maxVnodes = maxVnodes > TSDB_MAX_VNODES ? TSDB_MAX_VNODES : maxVnodes; + maxVnodes = maxVnodes < TSDB_MIN_VNODES ? TSDB_MIN_VNODES : maxVnodes; + if (tsNumOfTotalVnodes != 0) { + maxVnodes = tsNumOfTotalVnodes; + } + if (pDnode->alternativeRole == TSDB_DNODE_ROLE_MGMT) { + maxVnodes = 0; + } + + pDnode->numOfVnodes = maxVnodes; + pDnode->numOfFreeVnodes = maxVnodes; + pDnode->openVnodes = 0; +} + +void mgmtCalcNumOfFreeVnodes(SDnodeObj *pDnode) { + int totalVnodes = 0; + + for (int i = 0; i < pDnode->numOfVnodes; ++i) { + SVnodeLoad *pVload = pDnode->vload + i; + if (pVload->vgId != 0) { + totalVnodes++; + } + } + + pDnode->numOfFreeVnodes = pDnode->numOfVnodes - totalVnodes; +} + +void mgmtSetDnodeVgid(int vnode, int vgId) { + SDnodeObj *pDnode = &dnodeObj; + + SVnodeLoad *pVload = pDnode->vload + vnode; + memset(pVload, 0, sizeof(SVnodeLoad)); + pVload->vnode = vnode; + pVload->vgId = vgId; + mgmtCalcNumOfFreeVnodes(pDnode); +} + +void mgmtUnSetDnodeVgid(int vnode) { + SDnodeObj *pDnode = &dnodeObj; + + SVnodeLoad *pVload = pDnode->vload + vnode; + memset(pVload, 0, sizeof(SVnodeLoad)); + mgmtCalcNumOfFreeVnodes(pDnode); +} + +int mgmtGetDnodeMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "open vnodes"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "free vnodes"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->numOfRows = 1; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + pShow->pNode = NULL; + + return 0; +} + +int mgmtRetrieveDnodes(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + SDnodeObj *pDnode = &dnodeObj; + char * pWrite; + int cols = 0; + char ipstr[20]; + + if (pShow->numOfReads > 0) return 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pDnode->openVnodes; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pDnode->numOfFreeVnodes; + cols++; + + pShow->numOfReads += 1; + return 1; +} \ No newline at end of file diff --git a/src/system/src/mgmtDnodeInt.c b/src/system/src/mgmtDnodeInt.c new file mode 100644 index 000000000000..af73bc4be86a --- /dev/null +++ b/src/system/src/mgmtDnodeInt.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include +#include + +#include "dnodeSystem.h" +#include "mgmt.h" +#include "tsched.h" +#include "tutil.h" + +int mgmtSendVPeersMsg(SVgObj *pVgroup, SDbObj *pDb); +char *mgmtBuildVpeersIe(char *pMsg, SVgObj *pVgroup, SDbObj *pDb); +char *mgmtBuildCreateMeterIe(STabObj *pMeter, char *pMsg, int vnode); + +void vnodeProcessMsgFromMgmt(SSchedMsg *smsg); +void *rpcQhandle; + +int mgmtSendMsgToDnode(char *msg) { + mTrace("msg:%s is sent to dnode", taosMsg[*msg]); + + SSchedMsg schedMsg; + schedMsg.fp = vnodeProcessMsgFromMgmt; + schedMsg.msg = msg; + schedMsg.ahandle = NULL; + schedMsg.thandle = NULL; + taosScheduleTask(rpcQhandle, &schedMsg); + + return 0; +} + +int mgmtProcessMeterCfgMsg(unsigned char *cont) { + char * pMsg, *pStart; + STabObj * pMeter = NULL; + SMeterCfgMsg *pCfg = (SMeterCfgMsg *)cont; + SVgObj * pVgroup; + SDnodeObj * pObj = &dnodeObj; + + int vnode = htonl(pCfg->vnode); + int sid = htonl(pCfg->sid); + + pStart = (char *)malloc(64000); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_METER_CFG_RSP; + pMsg = pStart + 1; + + if (vnode < pObj->numOfVnodes) { + int vgId = pObj->vload[vnode].vgId; + + pVgroup = mgmtGetVgroup(vgId); + if (pVgroup) pMeter = pVgroup->meterList[sid]; + } + + if (pMeter) { + *pMsg = 0; // code + pMsg++; + pMsg = mgmtBuildCreateMeterIe(pMeter, pMsg, vnode); + } else { + mTrace("vnode:%d sid:%d, meter not there", vnode, sid); + *pMsg = TSDB_CODE_INVALID_METER_ID; + pMsg++; + + *(int32_t *)pMsg = htonl(vnode); + pMsg += sizeof(int32_t); + *(int32_t *)pMsg = htonl(sid); + pMsg += sizeof(int32_t); + } + + mgmtSendMsgToDnode(pStart); + + return 0; +} + +int mgmtProcessVpeerCfgMsg(unsigned char *cont) { + char * pMsg, *pStart; + SVpeerCfgMsg *pCfg = (SVpeerCfgMsg *)cont; + SVgObj * pVgroup = NULL; + SDnodeObj * pObj = &dnodeObj; + + int vnode = htonl(pCfg->vnode); + + pStart = (char *)malloc(256); + if (pStart == NULL) return 0; + + *pStart = TSDB_MSG_TYPE_VPEER_CFG_RSP; + pMsg = pStart + 1; + + if (vnode < pObj->numOfVnodes) pVgroup = mgmtGetVgroup(pObj->vload[vnode].vgId); + + if (pVgroup) { + SDbObj *pDb = mgmtGetDb(pVgroup->dbName); + *pMsg = 0; + pMsg++; + pMsg = mgmtBuildVpeersIe(pMsg, pVgroup, pDb); + } else { + mTrace("vnode:%d, no vgroup info, vgroup:%d", vnode, pObj->vload[vnode].vgId); + *pMsg = TSDB_CODE_INVALID_VALUE; + pMsg++; + *(int32_t *)pMsg = htonl(vnode); + pMsg += sizeof(int32_t); + } + + return 0; +} + +int mgmtProcessCreateRsp(unsigned char *msg) { return 0; } + +int mgmtProcessFreeVnodeRsp(unsigned char *msg) { return 0; } + +int mgmtProcessVPeersRsp(unsigned char *msg) { + STaosRsp *pRsp = (STaosRsp *)msg; + + SDbObj *pDb = mgmtGetDb(pRsp->more); + if (!pDb) { + mError("db not find, code:%d", pRsp->code); + return 0; + } + + if (pDb->vgStatus != TSDB_VG_STATUS_IN_PROGRESS) { + mTrace("db:%s vpeer rsp already disposed, code:%d", pRsp->more, pRsp->code); + return 0; + } + + if (pRsp->code == 0) { + pDb->vgStatus = TSDB_VG_STATUS_READY; + mTrace("db:%s vgroup is created in dnode", pRsp->more); + return 0; + } + + if (pRsp->code == TSDB_CODE_VG_COMMITLOG_INIT_FAILED) { + pDb->vgStatus = TSDB_VG_STATUS_COMMITLOG_INIT_FAILED; + } else { + pDb->vgStatus = TSDB_VG_STATUS_INIT_FAILED; + } + mError("db:%s vgroup create failed, code:%d", pRsp->more, pRsp->code); + + return 0; +} + +void mgmtProcessMsgFromVnode(SSchedMsg *sched) { + char msgType = *sched->msg; + char *content = sched->msg + 1; + + mTrace("msg:%s is received from dnode", taosMsg[msgType]); + + if (msgType == TSDB_MSG_TYPE_METER_CFG) { + mgmtProcessMeterCfgMsg(content); + } else if (msgType == TSDB_MSG_TYPE_VPEER_CFG) { + mgmtProcessVpeerCfgMsg(content); + } else if (msgType == TSDB_MSG_TYPE_CREATE_RSP) { + mgmtProcessCreateRsp(content); + } else if (msgType == TSDB_MSG_TYPE_REMOVE_RSP) { + // do nothing + } else if (msgType == TSDB_MSG_TYPE_VPEERS_RSP) { + mgmtProcessVPeersRsp(content); + } else if (msgType == TSDB_MSG_TYPE_FREE_VNODE_RSP) { + mgmtProcessFreeVnodeRsp(content); + } else if (msgType == TSDB_MSG_TYPE_CFG_PNODE_RSP) { + // do nothing; + } else if (msgType == TSDB_MSG_TYPE_ALTER_STREAM_RSP) { + // do nothing; + } else { + mError("%s from dnode is not processed", taosMsg[msgType]); + } + + free(sched->msg); +} + +char *mgmtBuildCreateMeterIe(STabObj *pMeter, char *pMsg, int vnode) { + SCreateMsg *pCreateMeter; + + pCreateMeter = (SCreateMsg *)pMsg; + pCreateMeter->vnode = htons(vnode); + pCreateMeter->sid = htonl(pMeter->gid.sid); + pCreateMeter->uid = pMeter->uid; + memcpy(pCreateMeter->meterId, pMeter->meterId, TSDB_METER_ID_LEN); + + // pCreateMeter->lastCreate = htobe64(pVgroup->lastCreate); + pCreateMeter->timeStamp = htobe64(pMeter->createdTime); + /* + pCreateMeter->spi = pSec->spi; + pCreateMeter->encrypt = pSec->encrypt; + memcpy(pCreateMeter->cipheringKey, pSec->cipheringKey, TSDB_KEY_LEN); + memcpy(pCreateMeter->secret, pSec->secret, TSDB_KEY_LEN); + */ + pCreateMeter->sversion = htonl(pMeter->sversion); + pCreateMeter->numOfColumns = htons(pMeter->numOfColumns); + SSchema *pSchema = mgmtGetMeterSchema(pMeter); + + for (int i = 0; i < pMeter->numOfColumns; ++i) { + pCreateMeter->schema[i].type = pSchema[i].type; + /* strcpy(pCreateMeter->schema[i].name, pSchema[i].name); */ + pCreateMeter->schema[i].bytes = htons(pSchema[i].bytes); + pCreateMeter->schema[i].colId = htons(pSchema[i].colId); + } + + pMsg = ((char *)(pCreateMeter->schema)) + pMeter->numOfColumns * sizeof(SMColumn); + pCreateMeter->sqlLen = 0; + + if (pMeter->pSql) { + int len = strlen(pMeter->pSql) + 1; + pCreateMeter->sqlLen = htons(len); + strcpy(pMsg, pMeter->pSql); + pMsg += len; + } + + return pMsg; +} + +int mgmtSendCreateMsgToVnode(STabObj *pMeter, int vnode) { + char *pMsg, *pStart; + + pStart = (char *)malloc(64000); + if (pStart == NULL) return -1; + + *pStart = TSDB_MSG_TYPE_CREATE; + pMsg = pStart + 1; + + pMsg = mgmtBuildCreateMeterIe(pMeter, pMsg, vnode); + mgmtSendMsgToDnode(pStart); + + return 0; +} + +int mgmtSendRemoveMeterMsgToVnode(STabObj *pMeter, int vnode) { + SRemoveMeterMsg *pRemove; + char * pMsg, *pStart; + + pStart = (char *)malloc(1+sizeof(SRemoveMeterMsg)); + if (pStart == NULL) return -1; + + *pStart = TSDB_MSG_TYPE_REMOVE; + pMsg = pStart + 1; + + pRemove = (SRemoveMeterMsg *)pMsg; + pRemove->vnode = htons(vnode); + pRemove->sid = htonl(pMeter->gid.sid); + memcpy(pRemove->meterId, pMeter->meterId, TSDB_METER_ID_LEN); + + mgmtSendMsgToDnode(pStart); + mTrace("vid:%d, send remove meter msg, sid:%d", vnode, pMeter->gid.sid); + + return 0; +} + +int mgmtSendAlterStreamMsgToVnode(STabObj *pMeter, int vnode) { + SAlterStreamMsg *pAlter; + char * pMsg, *pStart; + + pStart = (char *)malloc(128); + if (pStart == NULL) return -1; + + *pStart = TSDB_MSG_TYPE_ALTER_STREAM; + pMsg = pStart + 1; + + pAlter = (SAlterStreamMsg *)pMsg; + pAlter->vnode = htons(vnode); + pAlter->sid = htonl(pMeter->gid.sid); + pAlter->uid = pMeter->uid; + pAlter->status = pMeter->status; + + mgmtSendMsgToDnode(pStart); + + return 0; +} + +char *mgmtBuildVpeersIe(char *pMsg, SVgObj *pVgroup, SDbObj *pDb) { + SVPeersMsg *pVPeers = (SVPeersMsg *)pMsg; + + pVPeers->vnode = htonl(pVgroup->vnodeGid[0].vnode); + + pVPeers->cfg = pDb->cfg; + SVnodeCfg *pCfg = &pVPeers->cfg; + pCfg->vgId = htonl(pVgroup->vgId); + pCfg->maxSessions = htonl(pCfg->maxSessions); + pCfg->cacheBlockSize = htonl(pCfg->cacheBlockSize); + pCfg->cacheNumOfBlocks.totalBlocks = htonl(pCfg->cacheNumOfBlocks.totalBlocks); + pCfg->daysPerFile = htonl(pCfg->daysPerFile); + pCfg->daysToKeep1 = htonl(pCfg->daysToKeep1); + pCfg->daysToKeep2 = htonl(pCfg->daysToKeep2); + pCfg->daysToKeep = htonl(pCfg->daysToKeep); + pCfg->commitTime = htonl(pCfg->commitTime); + pCfg->blocksPerMeter = htons(pCfg->blocksPerMeter); + pCfg->replications = 1; + pCfg->rowsInFileBlock = htonl(pCfg->rowsInFileBlock); + + return pMsg; +} + +int mgmtSendVPeersMsg(SVgObj *pVgroup, SDbObj *pDb) { + char *pMsg, *pStart; + + pStart = (char *)malloc(1024); + if (pStart == NULL) return -1; + + *pStart = TSDB_MSG_TYPE_VPEERS; + pMsg = pStart + 1; + + pMsg = mgmtBuildVpeersIe(pMsg, pVgroup, pDb); + + mgmtSendMsgToDnode(pStart); + + return 0; +} + +int mgmtSendFreeVnodeMsg(int vnode) { + SFreeVnodeMsg *pFreeVnode; + char * pMsg, *pStart; + + pStart = (char *)malloc(128); + if (pStart == NULL) return -1; + + *pStart = TSDB_MSG_TYPE_FREE_VNODE; + pMsg = pStart + 1; + + pFreeVnode = (SFreeVnodeMsg *)pMsg; + pFreeVnode->vnode = htons(vnode); + + mgmtSendMsgToDnode(pStart); + + return 0; +} + +int mgmtCfgDynamicOptions(SDnodeObj *pDnode, char *msg) { return 0; } diff --git a/src/system/src/mgmtMeter.c b/src/system/src/mgmtMeter.c new file mode 100644 index 000000000000..921f104d1407 --- /dev/null +++ b/src/system/src/mgmtMeter.c @@ -0,0 +1,1906 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "mgmt.h" +#include "taosmsg.h" +#include "tast.h" +#include "textbuffer.h" +#include "tschemautil.h" +#include "tscompression.h" +#include "tskiplist.h" +#include "tsqlfunction.h" +#include "ttime.h" +#include "vnodeTagMgmt.h" + +extern int64_t sdbVersion; + +#define mgmtDestroyMeter(pMeter) \ + do { \ + tfree(pMeter->schema); \ + tSkipListDestroy(&(pMeter->pSkipList));\ + tfree(pMeter); \ + } while (0) + +enum _Meter_Update_Action { + METER_UPDATE_TAG_NAME, + METER_UPDATE_TAG_VALUE, + METER_UPDATE_TAG_VALUE_COL0, + METER_UPDATE_NULL, + MAX_METER_UPDATE_ACTION +}; + +typedef struct { + int32_t col; + int32_t pos; + SSchema schema; +} SchemaUnit; + +typedef struct { + char meterId[TSDB_METER_ID_LEN + 1]; + char type; + uint32_t cols; + char data[]; +} SMeterBatchUpdateMsg; + +typedef struct { + char meterId[TSDB_METER_ID_LEN + 1]; + char action; + int32_t dataSize; + char data[]; +} SMeterUpdateMsg; + +void *meterSdb = NULL; +void *(*mgmtMeterActionFp[SDB_MAX_ACTION_TYPES])(void *row, char *str, int size, int *ssize); + +// Function declaration +void *mgmtMeterActionInsert(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionDelete(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionUpdate(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionEncode(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionDecode(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionAfterBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionReset(void *row, char *str, int size, int *ssize); +void *mgmtMeterActionDestroy(void *row, char *str, int size, int *ssize); +int32_t mgmtMeterAddTags(STabObj *pMetric, SSchema schema[], int ntags); +static void removeMeterFromMetricIndex(STabObj *pMetric, STabObj *pMeter); +static void addMeterIntoMetricIndex(STabObj *pMetric, STabObj *pMeter); +int32_t mgmtMeterDropTagByName(STabObj *pMetric, char *name); +int32_t mgmtMeterModifyTagNameByName(STabObj *pMetric, const char *oname, const char *nname); +int32_t mgmtMeterModifyTagValueByName(STabObj *pMeter, char *tagName, char *nContent); +int32_t mgmtMeterAddColumn(STabObj *pMeter, SSchema schema[], int ncols); +int32_t mgmtMeterDropColumnByName(STabObj *pMeter, const char *name); + +void mgmtMeterActionInit() { + mgmtMeterActionFp[SDB_TYPE_INSERT] = mgmtMeterActionInsert; + mgmtMeterActionFp[SDB_TYPE_DELETE] = mgmtMeterActionDelete; + mgmtMeterActionFp[SDB_TYPE_UPDATE] = mgmtMeterActionUpdate; + mgmtMeterActionFp[SDB_TYPE_ENCODE] = mgmtMeterActionEncode; + mgmtMeterActionFp[SDB_TYPE_DECODE] = mgmtMeterActionDecode; + mgmtMeterActionFp[SDB_TYPE_BEFORE_BATCH_UPDATE] = mgmtMeterActionBeforeBatchUpdate; + mgmtMeterActionFp[SDB_TYPE_BATCH_UPDATE] = mgmtMeterActionBatchUpdate; + mgmtMeterActionFp[SDB_TYPE_AFTER_BATCH_UPDATE] = mgmtMeterActionAfterBatchUpdate; + mgmtMeterActionFp[SDB_TYPE_RESET] = mgmtMeterActionReset; + mgmtMeterActionFp[SDB_TYPE_DESTROY] = mgmtMeterActionDestroy; +} + +static int32_t mgmtGetTagsLength(STabObj *pMetric, int32_t col) { // length befor column col + assert(mgmtIsMetric(pMetric) && col >= 0); + + int32_t len = 0; + for (int32_t i = 0; i < pMetric->numOfTags && i < col; ++i) { + len += ((SSchema *)pMetric->schema)[pMetric->numOfColumns + i].bytes; + } + + return len; +} + +static int32_t mgmtGetReqTagsLength(STabObj *pMetric, int16_t *cols, int32_t numOfCols) { + assert(mgmtIsMetric(pMetric) && numOfCols >= 0 && numOfCols <= TSDB_MAX_TAGS); + + int32_t len = 0; + for (int32_t i = 0; i < numOfCols; ++i) { + assert(cols[i] < pMetric->numOfTags); + + if (cols[i] == -1) { + len += TSDB_METER_NAME_LEN; + } else { + len += ((SSchema *)pMetric->schema)[pMetric->numOfColumns + cols[i]].bytes; + } + } + + return len; +} + +/* + * remove the hole in result set + */ +static void mgmtVacuumResult(char *data, int32_t numOfCols, int32_t rows, int32_t capacity, SShowObj *pShow) { + if (rows < capacity) { + for (int32_t i = 0; i < numOfCols; ++i) { + memmove(data + pShow->offset[i] * rows, data + pShow->offset[i] * capacity, pShow->bytes[i] * rows); + } + } +} + +static char *mgmtMeterGetTag(STabObj *pMeter, int32_t col, SSchema *pTagColSchema) { + if (!mgmtMeterCreateFromMetric(pMeter)) { + return NULL; + } + + STabObj *pMetric = mgmtGetMeter(pMeter->pTagData); + int32_t offset = mgmtGetTagsLength(pMetric, col) + TSDB_METER_ID_LEN; + assert(offset > 0); + + if (pTagColSchema != NULL) { + *pTagColSchema = ((SSchema *)pMetric->schema)[pMetric->numOfColumns + col]; + } + + return (pMeter->pTagData + offset); +} + +void *mgmtMeterActionReset(void *row, char *str, int size, int *ssize) { + STabObj *pMeter = (STabObj *)row; + int tsize = pMeter->updateEnd - (char *)pMeter; + memcpy(pMeter, str, tsize); + pMeter->schema = (char *)realloc(pMeter->schema, pMeter->schemaSize); + memcpy(pMeter->schema, str + tsize, pMeter->schemaSize); + + if (mgmtMeterCreateFromMetric(pMeter)) { + pMeter->pTagData = pMeter->schema; + } + + return NULL; +} + +void *mgmtMeterActionDestroy(void *row, char *str, int size, int *ssize) { + STabObj *pMeter = (STabObj *)row; + mgmtDestroyMeter(pMeter); + return NULL; +} + +void *mgmtMeterActionInsert(void *row, char *str, int size, int *ssize) { + STabObj * pMeter = NULL; + SVgObj * pVgroup = NULL; + SDbObj * pDb = NULL; + STabObj * pMetric = NULL; + SAcctObj *pAcct = NULL; + + pMeter = (STabObj *)row; + + if (mgmtIsNormalMeter(pMeter)) { + pVgroup = mgmtGetVgroup(pMeter->gid.vgId); + if (pVgroup == NULL) { + mError("id:%s not in vgroup:%d", pMeter->meterId, pMeter->gid.vgId); + return NULL; + } + + pDb = mgmtGetDb(pVgroup->dbName); + if (pDb == NULL) { + mError("vgroup:%d not in DB:%s", pVgroup->vgId, pVgroup->dbName); + return NULL; + } + + pAcct = &acctObj; + } + + if (mgmtMeterCreateFromMetric(pMeter)) { + pMeter->pTagData = (char *)pMeter->schema; + pMetric = mgmtGetMeter(pMeter->pTagData); + assert(pMetric != NULL); + } + + if (pMeter->meterType == TSDB_METER_STABLE) { + pMeter->pSql = (char *)pMeter->schema + sizeof(SSchema) * pMeter->numOfColumns; + } + + if (mgmtIsNormalMeter(pMeter)) { + if (pMetric) { + mgmtAddMeterIntoMetric(pMetric, pMeter); + } + + pAcct->acctInfo.numOfTimeSeries += (pMeter->numOfColumns - 1); + pVgroup->numOfMeters++; + pDb->numOfTables++; + pVgroup->meterList[pMeter->gid.sid] = pMeter; + + if (pVgroup->numOfMeters >= pDb->cfg.maxSessions - 1 && pDb->numOfVgroups > 1) { + mgmtMoveVgroupToTail(pDb, pVgroup); + } + } else { + // insert a metric + pMeter->pHead = NULL; + pMeter->pSkipList = NULL; + pDb = mgmtGetDbByMeterId(pMeter->meterId); + if (pDb) { + mgmtAddMetricIntoDb(pDb, pMeter); + } + } + + return NULL; +} + +void *mgmtMeterActionDelete(void *row, char *str, int size, int *ssize) { + STabObj *pMeter = NULL; + SVgObj * pVgroup = NULL; + SDbObj * pDb = NULL; + STabObj *pMetric = NULL; + + pMeter = (STabObj *)row; + + if (mgmtIsNormalMeter(pMeter)) { + pVgroup = mgmtGetVgroup(pMeter->gid.vgId); + if (pVgroup == NULL) { + mError("id:%s not in vgroup:%d", pMeter->meterId, pMeter->gid.vgId); + return NULL; + } + + pDb = mgmtGetDb(pVgroup->dbName); + if (pDb == NULL) { + mError("vgroup:%d not in DB:%s", pVgroup->vgId, pVgroup->dbName); + return NULL; + } + } + + if (mgmtMeterCreateFromMetric(pMeter)) { + pMeter->pTagData = (char *)pMeter->schema; + pMetric = mgmtGetMeter(pMeter->pTagData); + assert(pMetric != NULL); + } + + if (mgmtIsNormalMeter(pMeter)) { + if (pMetric) mgmtRemoveMeterFromMetric(pMetric, pMeter); + + pVgroup->meterList[pMeter->gid.sid] = NULL; + pVgroup->numOfMeters--; + pDb->numOfTables--; + taosFreeId(pVgroup->idPool, pMeter->gid.sid); + + if (pVgroup->numOfMeters > 0) mgmtMoveVgroupToHead(pDb, pVgroup); + } else { + // remove a metric + // remove all the associated meters + + pDb = mgmtGetDbByMeterId(pMeter->meterId); + if (pDb) mgmtRemoveMetricFromDb(pDb, pMeter); + } + + return NULL; +} + +void *mgmtMeterActionUpdate(void *row, char *str, int size, int *ssize) { + STabObj *pMeter = NULL; + STabObj *pMetric = NULL; + + pMeter = (STabObj *)row; + STabObj *pNew = (STabObj *)str; + + if (pNew->isDirty) { + pMetric = mgmtGetMeter(pMeter->pTagData); + removeMeterFromMetricIndex(pMetric, pMeter); + } + mgmtMeterActionReset(pMeter, str, size, NULL); + pMeter->pTagData = pMeter->schema; + if (pNew->isDirty) { + addMeterIntoMetricIndex(pMetric, pMeter); + pMeter->isDirty = 0; + } + + return NULL; +} + +void *mgmtMeterActionEncode(void *row, char *str, int size, int *ssize) { + assert(row != NULL && str != NULL); + + STabObj *pMeter = (STabObj *)row; + int tsize = pMeter->updateEnd - (char *)pMeter; + + if (size < tsize + pMeter->schemaSize + 1) { + *ssize = -1; + return NULL; + } + + memcpy(str, pMeter, tsize); + memcpy(str + tsize, pMeter->schema, pMeter->schemaSize); + + *ssize = tsize + pMeter->schemaSize; + + return NULL; +} + +void *mgmtMeterActionDecode(void *row, char *str, int size, int *ssize) { + assert(str != NULL); + + STabObj *pMeter = (STabObj *)malloc(sizeof(STabObj)); + if (pMeter == NULL) return NULL; + memset(pMeter, 0, sizeof(STabObj)); + + int tsize = pMeter->updateEnd - (char *)pMeter; + if (size < tsize) { + mgmtDestroyMeter(pMeter); + return NULL; + } + memcpy(pMeter, str, tsize); + + pMeter->schema = (char *)malloc(pMeter->schemaSize); + if (pMeter->schema == NULL) { + mgmtDestroyMeter(pMeter); + return NULL; + } + + memcpy(pMeter->schema, str + tsize, pMeter->schemaSize); + + return (void *)pMeter; +} + +void *mgmtMeterActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize) { + STabObj *pMetric = (STabObj *)row; + + pthread_rwlock_wrlock(&(pMetric->rwLock)); + + return NULL; +} + +void *mgmtMeterActionBatchUpdate(void *row, char *str, int size, int *ssize) { + STabObj * pMeter = (STabObj *)row; + SMeterBatchUpdateMsg *msg = (SMeterBatchUpdateMsg *)str; + + if (mgmtIsMetric(pMeter)) { + if (msg->type == SDB_TYPE_INSERT) { // Insert schema + uint32_t total_cols = pMeter->numOfColumns + pMeter->numOfTags; + pMeter->schema = realloc(pMeter->schema, (total_cols + msg->cols) * sizeof(SSchema)); + pMeter->schemaSize = (total_cols + msg->cols) * sizeof(SSchema); + pMeter->numOfTags += msg->cols; + memcpy(pMeter->schema + total_cols * sizeof(SSchema), msg->data, msg->cols * sizeof(SSchema)); + + } else if (msg->type == SDB_TYPE_DELETE) { // Delete schema + // Make sure the order of tag columns + SchemaUnit *schemaUnit = (SchemaUnit *)(msg->data); + int col = schemaUnit->col; + assert(col > 0 && col < pMeter->numOfTags); + if (col < pMeter->numOfTags - 1) { + memmove(pMeter->schema + sizeof(SSchema) * (pMeter->numOfColumns + col), + pMeter->schema + sizeof(SSchema) * (pMeter->numOfColumns + col + 1), + pMeter->schemaSize - (sizeof(SSchema) * (pMeter->numOfColumns + col + 1))); + } + pMeter->schemaSize -= sizeof(SSchema); + pMeter->numOfTags--; + pMeter->schema = realloc(pMeter->schema, pMeter->schemaSize); + } + + return pMeter->pHead; + + } else if (mgmtMeterCreateFromMetric(pMeter)) { + if (msg->type == SDB_TYPE_INSERT) { + SSchema *schemas = (SSchema *)msg->data; + int total_size = 0; + for (int i = 0; i < msg->cols; i++) { + total_size += schemas[i].bytes; + } + pMeter->schema = realloc(pMeter->schema, pMeter->schemaSize + total_size); + pMeter->pTagData = pMeter->schema; + memset(pMeter->schema + pMeter->schemaSize, 0, total_size); + pMeter->schemaSize += total_size; + // TODO: set the data as default value + } else if (msg->type == SDB_TYPE_DELETE) { // Delete values in MTABLEs + SchemaUnit *schemaUnit = (SchemaUnit *)(msg->data); + int32_t pos = schemaUnit->pos; + int32_t bytes = schemaUnit->schema.bytes; + assert(pos + bytes <= pMeter->schemaSize); + + if (pos + bytes != pMeter->schemaSize) { + memmove(pMeter->schema + pos, pMeter->schema + pos + bytes, pMeter->schemaSize - (pos + bytes)); + } + + pMeter->schemaSize -= bytes; + pMeter->schema = realloc(pMeter->schema, pMeter->schemaSize); + } + + return pMeter->next; + } + + return NULL; +} + +void *mgmtMeterActionAfterBatchUpdate(void *row, char *str, int size, int *ssize) { + STabObj *pMetric = (STabObj *)row; + + pthread_rwlock_unlock(&(pMetric->rwLock)); + + return NULL; +} + +void *mgmtMeterAction(char action, void *row, char *str, int size, int *ssize) { + if (mgmtMeterActionFp[action] != NULL) { + return (*(mgmtMeterActionFp[action]))(row, str, size, ssize); + } + return NULL; +} + +void mgmtAddMeterStatisticToAcct(STabObj *pMeter, SAcctObj *pAcct) { + pAcct->acctInfo.numOfTimeSeries += (pMeter->numOfColumns - 1); +} + +int mgmtInitMeters() { + void * pNode = NULL; + SVgObj * pVgroup = NULL; + STabObj * pMeter = NULL; + STabObj * pMetric = NULL; + SDbObj * pDb = NULL; + SAcctObj *pAcct = NULL; + + // TODO: Make sure this function only run once + mgmtMeterActionInit(); + + meterSdb = sdbOpenTable(tsMaxTables, sizeof(STabObj) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_MAX_SQL_LEN, + "meters", SDB_KEYTYPE_STRING, mgmtDirectory, mgmtMeterAction); + if (meterSdb == NULL) { + mError("failed to init meter data"); + return -1; + } + + pNode = NULL; + while (1) { + pNode = sdbFetchRow(meterSdb, pNode, (void **)&pMeter); + if (pMeter == NULL) break; + if (mgmtIsMetric(pMeter)) pMeter->numOfMeters = 0; + } + + pNode = NULL; + while (1) { + pNode = sdbFetchRow(meterSdb, pNode, (void **)&pMeter); + if (pMeter == NULL) break; + + pDb = mgmtGetDbByMeterId(pMeter->meterId); + if (pDb == NULL) { + mError("failed to get db: %s", pMeter->meterId); + continue; + } + + if (mgmtIsNormalMeter(pMeter)) { + pVgroup = mgmtGetVgroup(pMeter->gid.vgId); + if (pVgroup == NULL || pVgroup->meterList == NULL) { + mError("failed to get vgroup:%i", pMeter->gid.vgId); + continue; + } + pVgroup->meterList[pMeter->gid.sid] = pMeter; + taosIdPoolMarkStatus(pVgroup->idPool, pMeter->gid.sid, 1); + + if (pMeter->meterType == TSDB_METER_STABLE) { + pMeter->pSql = (char *)pMeter->schema + sizeof(SSchema) * pMeter->numOfColumns; + } + + if (mgmtMeterCreateFromMetric(pMeter)) { + pMeter->pTagData = (char *)pMeter->schema; // + sizeof(SSchema)*pMeter->numOfColumns; + pMetric = mgmtGetMeter(pMeter->pTagData); + if (pMetric) { + mgmtAddMeterIntoMetric(pMetric, pMeter); + } + } + + pAcct = &acctObj; + mgmtAddMeterStatisticToAcct(pMeter, pAcct); + } else { + if (pDb) mgmtAddMetricIntoDb(pDb, pMeter); + } + } + + mgmtSetVgroupIdPool(); + + mTrace("meter is initialized"); + return 0; +} + +STabObj *mgmtGetMeter(char *meterId) { return (STabObj *)sdbGetRow(meterSdb, meterId); } + +int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { + STabObj * pMeter = NULL; + STabObj * pMetric = NULL; + SVgObj * pVgroup = NULL; + int size = 0; + SAcctObj *pAcct = NULL; + + int numOfTables = sdbGetNumOfRows(meterSdb); + if (numOfTables >= tsMaxTables) { + mWarn("numOfTables:%d, exceed tsMaxTables:%d", numOfTables, tsMaxTables); + return TSDB_CODE_TOO_MANY_TABLES; + } + + // does table exist? + pMeter = mgmtGetMeter(pCreate->meterId); + if (pMeter) { + if (pCreate->igExists) { + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_TABLE_ALREADY_EXIST; + } + } + + // Create the table object + pMeter = (STabObj *)malloc(sizeof(STabObj)); + if (pMeter == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY; + memset(pMeter, 0, sizeof(STabObj)); + + if (pCreate->numOfColumns == 0 && pCreate->numOfTags == 0) { // MTABLE + pMeter->meterType = TSDB_METER_MTABLE; + char *pTagData = (char *)pCreate->schema; // it is a tag key + pMetric = mgmtGetMeter(pTagData); + if (pMetric == NULL) { + return TSDB_CODE_INVALID_TABLE; + } + + /* + * for meters created according to metrics, the schema of this meter isn't + * needed. + * so, we don't allocate memory for it in order to save a huge amount of + * memory when a large amount of meters are created using metrics. + */ + size = mgmtGetTagsLength(pMetric, INT_MAX) + (uint32_t)TSDB_METER_ID_LEN; + pMeter->schema = (char *)malloc(size); + if (pMeter->schema == NULL) { + mgmtDestroyMeter(pMeter); + return TSDB_CODE_INVALID_TABLE; + } + memset(pMeter->schema, 0, size); + + pMeter->schemaSize = size; + + pMeter->numOfColumns = pMetric->numOfColumns; + pMeter->sversion = pMetric->sversion; + pMeter->pTagData = pMeter->schema; // + pMetric->numOfColumns*sizeof(SSchema); + pMeter->nextColId = pMetric->nextColId; + memcpy(pMeter->pTagData, pTagData, size); + + } else { + int numOfCols = pCreate->numOfColumns + pCreate->numOfTags; + size = numOfCols * sizeof(SSchema) + pCreate->sqlLen; + pMeter->schema = (char *)malloc(size); + if (pMeter->schema == NULL) { + mgmtDestroyMeter(pMeter); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + memset(pMeter->schema, 0, size); + + pMeter->numOfColumns = pCreate->numOfColumns; + pMeter->sversion = 0; + pMeter->numOfTags = pCreate->numOfTags; + pMeter->schemaSize = size; + memcpy(pMeter->schema, pCreate->schema, numOfCols * sizeof(SSchema)); + + for (int k = 0; k < pCreate->numOfColumns; k++) { + SSchema *tschema = (SSchema *)pMeter->schema; + tschema[k].colId = pMeter->nextColId++; + } + + if (pCreate->sqlLen > 0) { + pMeter->meterType = TSDB_METER_STABLE; + pMeter->pSql = pMeter->schema + numOfCols * sizeof(SSchema); + memcpy(pMeter->pSql, (char *)(pCreate->schema) + numOfCols * sizeof(SSchema), pCreate->sqlLen); + pMeter->pSql[pCreate->sqlLen - 1] = 0; + mTrace("stream sql len:%d, sql:%s", pCreate->sqlLen, pMeter->pSql); + } else { + if (pCreate->numOfTags > 0) { + pMeter->meterType = TSDB_METER_METRIC; + } else { + pMeter->meterType = TSDB_METER_OTABLE; + } + } + } + + pMeter->createdTime = taosGetTimestampMs(); + strcpy(pMeter->meterId, pCreate->meterId); + if (pthread_rwlock_init(&pMeter->rwLock, NULL)) { + mError("Failed to init meter lock"); + mgmtDestroyMeter(pMeter); + return TSDB_CODE_OTHERS; + } + + if (pCreate->numOfTags == 0) { // handle normal meter creation + pVgroup = pDb->pHead; + + if (pDb->vgStatus == TSDB_VG_STATUS_IN_PROGRESS) { + mgmtDestroyMeter(pMeter); + return TSDB_CODE_ACTION_IN_PROGRESS; + } + + if (pDb->vgStatus == TSDB_VG_STATUS_FULL) { + mgmtDestroyMeter(pMeter); + return TSDB_CODE_NO_ENOUGH_PNODES; + } + + if (pDb->vgStatus == TSDB_VG_STATUS_COMMITLOG_INIT_FAILED) { + mgmtDestroyMeter(pMeter); + return TSDB_CODE_VG_COMMITLOG_INIT_FAILED; + } + + if (pDb->vgStatus == TSDB_VG_STATUS_INIT_FAILED) { + mgmtDestroyMeter(pMeter); + return TSDB_CODE_VG_INIT_FAILED; + } + + if (pVgroup == NULL) { + pDb->vgStatus = TSDB_VG_STATUS_IN_PROGRESS; + mgmtCreateVgroup(pDb); + mgmtDestroyMeter(pMeter); + return TSDB_CODE_ACTION_IN_PROGRESS; + } + + int sid = taosAllocateId(pVgroup->idPool); + if (sid < 0) { + mWarn("db:%s, vgroup:%d, run out of ID, num:%d", pDb->name, pVgroup->vgId, taosIdPoolNumOfUsed(pVgroup->idPool)); + pDb->vgStatus = TSDB_VG_STATUS_IN_PROGRESS; + mgmtCreateVgroup(pDb); + mgmtDestroyMeter(pMeter); + return TSDB_CODE_ACTION_IN_PROGRESS; + } + + pMeter->gid.sid = sid; + pMeter->gid.vgId = pVgroup->vgId; + pMeter->uid = (((uint64_t)pMeter->gid.vgId) << 40) + ((((uint64_t)pMeter->gid.sid) & ((1ul << 24) - 1ul)) << 16) + + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); + } else { + pMeter->uid = (((uint64_t)pMeter->createdTime) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); + } + + if (sdbInsertRow(meterSdb, pMeter, 0) < 0) { + return TSDB_CODE_SDB_ERROR; + } + + // send create message to the selected vnode servers + if (pCreate->numOfTags == 0) { + mgmtSendCreateMsgToVnode(pMeter, pVgroup->vnodeGid[0].vnode); + } + + return 0; +} + +int mgmtDropMeter(SDbObj *pDb, char *meterId, int ignore) { + STabObj * pMeter; + SVgObj * pVgroup; + SAcctObj *pAcct; + + pMeter = mgmtGetMeter(meterId); + if (pMeter == NULL) { + if (ignore) { + return TSDB_CODE_SUCCESS; + } else { + return TSDB_CODE_INVALID_TABLE; + } + } + + pAcct = &acctObj; + + // 0.sys + if (taosCheckDbName(pDb->name, tsMonitorDbName)) return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + + if (mgmtIsNormalMeter(pMeter)) { + if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pMeter->numOfColumns - 1); + pVgroup = mgmtGetVgroup(pMeter->gid.vgId); + if (pVgroup == NULL) { + return TSDB_CODE_OTHERS; + } + + mgmtSendRemoveMeterMsgToVnode(pMeter, pVgroup->vnodeGid[0].vnode); + sdbDeleteRow(meterSdb, pMeter); + + if (pVgroup->numOfMeters <= 0) mgmtDropVgroup(pDb, pVgroup); + } else { + // remove a metric + if (pMeter->numOfMeters > 0) { + assert(pMeter->pSkipList != NULL && pMeter->pSkipList->nSize > 0); + return TSDB_CODE_RELATED_TABLES_EXIST; + } + sdbDeleteRow(meterSdb, pMeter); + } + + return 0; +} + +int mgmtAlterMeter(SDbObj *pDb, SAlterTableMsg *pAlter) { + STabObj *pMeter; + + pMeter = mgmtGetMeter(pAlter->meterId); + if (pMeter == NULL) { + return TSDB_CODE_INVALID_TABLE; + } + + // 0.sys + if (taosCheckDbName(pDb->name, tsMonitorDbName)) { + return TSDB_CODE_MONITOR_DB_FORBEIDDEN; + } + + if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { + if (!mgmtIsNormalMeter(pMeter) || !mgmtMeterCreateFromMetric(pMeter)) { + return TSDB_CODE_OPS_NOT_SUPPORT; + } + } + + // todo add + /* mgmtMeterAddTags */ + if (pAlter->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { + mTrace("alter table %s to add tag column:%s, type:%d", pMeter->meterId, pAlter->schema[0].name, + pAlter->schema[0].type); + return mgmtMeterAddTags(pMeter, pAlter->schema, 1); + } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) { + mTrace("alter table %s to drop tag column:%s", pMeter->meterId, pAlter->schema[0].name); + return mgmtMeterDropTagByName(pMeter, pAlter->schema[0].name); + } else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { + mTrace("alter table %s to change tag column name, old: %s, new: %s", pMeter->meterId, pAlter->schema[0].name, + pAlter->schema[1].name); + return mgmtMeterModifyTagNameByName(pMeter, pAlter->schema[0].name, pAlter->schema[1].name); + } else if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) { + mTrace("alter table %s to modify tag value, tag name:%s", pMeter->meterId, pAlter->schema[0].name); + return mgmtMeterModifyTagValueByName(pMeter, pAlter->schema[0].name, pAlter->tagVal); + } else if (pAlter->type == TSDB_ALTER_TABLE_ADD_COLUMN) { + mTrace("alter table %s to add column:%s, type:%d", pMeter->meterId, pAlter->schema[0].name, pAlter->schema[0].type); + return mgmtMeterAddColumn(pMeter, pAlter->schema, 1); + } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) { + mTrace("alter table %s to drop column:%s", pMeter->meterId, pAlter->schema[0].name); + return mgmtMeterDropColumnByName(pMeter, pAlter->schema[0].name); + } else { + return TSDB_CODE_INVALID_MSG_TYPE; + } + + return TSDB_CODE_SUCCESS; +} + +/* + * create key of each meter for skip list, which is generated from first tag + * column + */ +static void createKeyFromTagValue(STabObj *pMetric, STabObj *pMeter, tSkipListKey *pKey) { + SSchema * pTagSchema = (SSchema *)(pMetric->schema + pMetric->numOfColumns * sizeof(SSchema)); + const int16_t KEY_COLUMN_OF_TAGS = 0; + + char *tagVal = pMeter->pTagData + TSDB_METER_ID_LEN; // tag start position + *pKey = tSkipListCreateKey(pTagSchema[KEY_COLUMN_OF_TAGS].type, tagVal, pTagSchema[KEY_COLUMN_OF_TAGS].bytes); +} + +/* + * add a meter into a metric's skip list + */ +static void addMeterIntoMetricIndex(STabObj *pMetric, STabObj *pMeter) { + const int16_t KEY_COLUMN_OF_TAGS = 0; + SSchema * pTagSchema = (SSchema *)(pMetric->schema + pMetric->numOfColumns * sizeof(SSchema)); + + if (pMetric->pSkipList == NULL) { + tSkipListCreate(&pMetric->pSkipList, MAX_SKIP_LIST_LEVEL, pTagSchema[KEY_COLUMN_OF_TAGS].type, + pTagSchema[KEY_COLUMN_OF_TAGS].bytes, tSkipListDefaultCompare); + } + + if (pMetric->pSkipList) { + tSkipListKey key = {0}; + createKeyFromTagValue(pMetric, pMeter, &key); + + tSkipListPut(pMetric->pSkipList, pMeter, &key, 1); + + tSkipListDestroyKey(&key); + } +} + +static void removeMeterFromMetricIndex(STabObj *pMetric, STabObj *pMeter) { + if (pMetric->pSkipList == NULL) { + return; + } + + tSkipListKey key = {0}; + createKeyFromTagValue(pMetric, pMeter, &key); + tSkipListNode **pRes = NULL; + + int32_t num = tSkipListGets(pMetric->pSkipList, &key, &pRes); + for (int32_t i = 0; i < num; ++i) { + STabObj *pOneMeter = (STabObj *)pRes[i]->pData; + if (pOneMeter->gid.sid == pMeter->gid.sid && pOneMeter->gid.vgId == pMeter->gid.vgId) { + assert(pMeter == pOneMeter); + tSkipListRemoveNode(pMetric->pSkipList, pRes[i]); + } + } + + if (num != 0) { + free(pRes); + } +} + +int mgmtAddMeterIntoMetric(STabObj *pMetric, STabObj *pMeter) { + if (pMeter == NULL || pMetric == NULL) return -1; + + pthread_rwlock_wrlock(&(pMetric->rwLock)); + // add meter into skip list + pMeter->next = pMetric->pHead; + pMeter->prev = NULL; + + if (pMetric->pHead) pMetric->pHead->prev = pMeter; + + pMetric->pHead = pMeter; + pMetric->numOfMeters++; + + addMeterIntoMetricIndex(pMetric, pMeter); + + pthread_rwlock_unlock(&(pMetric->rwLock)); + + return 0; +} + +int mgmtRemoveMeterFromMetric(STabObj *pMetric, STabObj *pMeter) { + pthread_rwlock_wrlock(&(pMetric->rwLock)); + + if (pMeter->prev) pMeter->prev->next = pMeter->next; + + if (pMeter->next) pMeter->next->prev = pMeter->prev; + + if (pMeter->prev == NULL) pMetric->pHead = pMeter->next; + + pMetric->numOfMeters--; + + removeMeterFromMetricIndex(pMetric, pMeter); + + pthread_rwlock_unlock(&(pMetric->rwLock)); + + return 0; +} + +void mgmtCleanUpMeters() { sdbCloseTable(meterSdb); } + +int mgmtGetMeterMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + if (pConn->pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = TSDB_METER_NAME_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "table_name"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "created time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "columns"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_METER_NAME_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "stable"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + // pShow->numOfRows = sdbGetNumOfRows (meterSdb); + pShow->numOfRows = pConn->pDb->numOfTables; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + return 0; +} + +static int32_t tabObjVGIDComparator(const void *pLeft, const void *pRight) { + STabObj *p1 = *(STabObj **)pLeft; + STabObj *p2 = *(STabObj **)pRight; + + int32_t ret = p1->gid.vgId - p2->gid.vgId; + if (ret == 0) { + return ret; + } else { + return ret > 0 ? 1 : -1; + } +} + +/* + * qsort comparator + * sort the result to ensure meters with the same gid is grouped together + */ +static int32_t nodeVGIDComparator(const void *pLeft, const void *pRight) { + tSkipListNode *p1 = *((tSkipListNode **)pLeft); + tSkipListNode *p2 = *((tSkipListNode **)pRight); + + return tabObjVGIDComparator(&p1->pData, &p2->pData); +} + +SSchema *mgmtGetMeterSchema(STabObj *pMeter) { + if (pMeter == NULL) { + return NULL; + } + + if (!mgmtMeterCreateFromMetric(pMeter)) { + return (SSchema *)pMeter->schema; + } + + STabObj *pMetric = mgmtGetMeter(pMeter->pTagData); + assert(pMetric != NULL); + + return (SSchema *)pMetric->schema; +} + +/* + * serialize SVnodeSidList to byte array + */ +static char *mgmtBuildMetricMetaMsg(STabObj *pMeter, int32_t *ovgId, SVnodeSidList **pList, SMetricMeta *pMeta, + int32_t tagLen, int16_t numOfTags, int16_t *tagsId, int32_t maxNumOfMeters, + char *pMsg) { + if (pMeter->gid.vgId != *ovgId || ((*pList) != NULL && (*pList)->numOfSids >= maxNumOfMeters)) { + /* + * here we construct a new vnode group for 2 reasons + * 1. the query msg may be larger than 64k, + * 2. the following meters belong to different vnodes + */ + + (*pList) = (SVnodeSidList *)pMsg; + (*pList)->numOfSids = 0; + (*pList)->index = 0; + pMeta->numOfVnodes++; + + SVgObj *pVgroup = mgmtGetVgroup(pMeter->gid.vgId); + (*pList)->vpeerDesc[0].vnode = pVgroup->vnodeGid[0].vnode; + + pMsg += sizeof(SVnodeSidList); + (*ovgId) = pMeter->gid.vgId; + } + pMeta->numOfMeters++; + (*pList)->numOfSids++; + + SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg; + pSMeterTagInfo->sid = pMeter->gid.sid; + pMsg += sizeof(SMeterSidExtInfo); + + int32_t offset = 0; + for (int32_t j = 0; j < numOfTags; ++j) { + if (tagsId[j] == -1) { + char name[TSDB_METER_NAME_LEN] = {0}; + extractMeterName(pMeter->meterId, name); + + memcpy(pMsg + offset, name, TSDB_METER_NAME_LEN); + offset += TSDB_METER_NAME_LEN; + } else { + SSchema s = {0}; + char * tag = mgmtMeterGetTag(pMeter, tagsId[j], &s); + + memcpy(pMsg + offset, tag, (size_t)s.bytes); + offset += s.bytes; + } + } + + pMsg += offset; + assert(offset == tagLen); + + return pMsg; +} + +static STabObj *mgmtGetResultPayload(tQueryResultset *pRes, int32_t index) { + if (index < 0 || index >= pRes->num) { + return NULL; + } + + if (pRes->nodeType == TAST_NODE_TYPE_INDEX_ENTRY) { + return (STabObj *)((tSkipListNode *)pRes->pRes[index])->pData; + } else { + return (STabObj *)pRes->pRes[index]; + } +} + +// get total number of vnodes in final result set +static int32_t mgmtGetNumOfVnodesInResult(tQueryResultset *pResult) { + int32_t numOfVnodes = 0; + int32_t prevGid = -1; + + for (int32_t i = 0; i < pResult->num; ++i) { + STabObj *pMeter = mgmtGetResultPayload(pResult, i); + if (prevGid == -1) { + prevGid = pMeter->gid.vgId; + numOfVnodes++; + } else if (prevGid != pMeter->gid.vgId) { + prevGid = pMeter->gid.vgId; + numOfVnodes++; + } + } + + return numOfVnodes; +} + +static void mgmtRetrieveMetersFromIDs(tQueryResultset *pRes, char *queryStr, char *origins, STabObj *pMetric) { + char * sep = ","; + char * pToken = NULL; + int32_t s = 4; + + pRes->pRes = malloc(sizeof(char *) * s); + pRes->nodeType = TAST_NODE_TYPE_METER_PTR; + pRes->num = 0; + + for (pToken = strsep(&queryStr, sep); pToken != NULL; pToken = strsep(&queryStr, sep)) { + STabObj *pMeterObj = mgmtGetMeter(pToken); + if (pMeterObj == NULL) { + mWarn("metric:%s error in metric query expression:%s, invalid meter id:%s", pMetric->meterId, origins, pToken); + continue; + } else { + /* double the old size */ + if (pRes->num >= s) { + s *= 2; + pRes->pRes = realloc(pRes->pRes, sizeof(char *) * s); + } + + /* not a table created from metric, ignore */ + if (pMeterObj->meterType != TSDB_METER_MTABLE) { + continue; + } + + /* queried meter not belongs to this metric, ignore */ + if (mgmtGetMeter(pMeterObj->pTagData)->uid != pMetric->uid) { + continue; + } + + pRes->pRes[pRes->num++] = pMeterObj; + } + } +} + +static int32_t tabObjResultComparator(const void *p1, const void *p2, void *param) { + tOrderDescriptor *pOrderDesc = (tOrderDescriptor *)param; + + STabObj *pNode1 = (STabObj *)p1; + STabObj *pNode2 = (STabObj *)p2; + + for (int32_t i = 0; i < pOrderDesc->orderIdx.numOfOrderedCols; ++i) { + int32_t colIdx = pOrderDesc->orderIdx.pData[i]; + + char *f1 = NULL; + char *f2 = NULL; + + SSchema schema = {0}; + + if (colIdx == -1) { + f1 = pNode1->meterId; + f2 = pNode2->meterId; + schema.type = TSDB_DATA_TYPE_BINARY; + schema.bytes = TSDB_METER_ID_LEN; + } else { + f1 = mgmtMeterGetTag(pNode1, colIdx, NULL); + f2 = mgmtMeterGetTag(pNode2, colIdx, &schema); + assert(schema.type == pOrderDesc->pTagSchema->pSchema[colIdx].type); + } + + int32_t ret = doCompare(f1, f2, schema.type, schema.bytes); + if (ret == 0) { + continue; + } else { + return ret; + } + } + + return 0; +} + +static int32_t nodeResultComparator(const void *p1, const void *p2, void *param) { + STabObj *pNode1 = (STabObj *)((tSkipListNode *)p1)->pData; + STabObj *pNode2 = (STabObj *)((tSkipListNode *)p2)->pData; + + return tabObjResultComparator(pNode1, pNode2, param); +} + +// todo merge sort function with losertree used +static void mgmtReorganizeMetersInMetricMeta(STabObj *pMetric, SMetricMetaMsg *pInfo, SSchema *pTagSchema, + tQueryResultset *pRes) { + /* no result, no need to pagination */ + if (pRes->num <= 0) { + return; + } + + /* + * To apply the group limitation and group offset, we should sort the result + * list according to the + * order condition + */ + tOrderDescriptor *descriptor = + (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int32_t) * pInfo->numOfGroupbyCols); + descriptor->pTagSchema = tCreateTagSchema(pTagSchema, pMetric->numOfTags); + descriptor->orderIdx.numOfOrderedCols = pInfo->numOfGroupbyCols; + + int32_t *startPos = NULL; + int32_t numOfSubset = 1; + + if (pInfo->numOfGroupbyCols > 0) { + memcpy(descriptor->orderIdx.pData, (int16_t *)pInfo->groupbyTagIds, sizeof(int16_t) * pInfo->numOfGroupbyCols); + // sort results list + __ext_compar_fn_t comparFn = + (pRes->nodeType == TAST_NODE_TYPE_METER_PTR) ? tabObjResultComparator : nodeResultComparator; + + tQSortEx(pRes->pRes, POINTER_BYTES, 0, pRes->num - 1, descriptor, comparFn); + startPos = calculateSubGroup(pRes->pRes, pRes->num, &numOfSubset, descriptor, comparFn); + } else { + startPos = malloc(2 * sizeof(int32_t)); + + startPos[0] = 0; + startPos[1] = (int32_t)pRes->num; + } + + /* if pInfo->limit == 0, the query will be intercepted by sdk, and wont be + * sent to mnode */ + assert(pInfo->limit == -1 || pInfo->limit > 0); + + int32_t numOfTotal = 0; + if (pInfo->offset >= numOfSubset) { + numOfTotal = 0; + } else if (numOfSubset == 1) { + // no 'groupBy' clause, all tables returned + numOfTotal = pRes->num; + } else { + /* there is a offset value of group */ + int32_t start = 0; + int32_t end = 0; + + if (pInfo->orderType == TSQL_SO_ASC) { + start = startPos[pInfo->offset]; + + if (pInfo->limit + pInfo->offset >= numOfSubset || pInfo->limit == -1) { + /* all results are required */ + end = startPos[numOfSubset]; + } else { + end = startPos[pInfo->limit + pInfo->offset]; + } + } else { + end = startPos[numOfSubset - pInfo->offset]; + + if (pInfo->limit + pInfo->offset >= numOfSubset || pInfo->limit == -1) { + start = startPos[0]; + } else { + start = startPos[numOfSubset - pInfo->limit - pInfo->offset]; + } + } + + numOfTotal = end - start; + assert(numOfTotal > 0); + + memmove(pRes->pRes, pRes->pRes + start, numOfTotal * POINTER_BYTES); + } + + /* + * sort the result according to vgid to ensure meters with the same vgid is + * continuous in the result list + */ + __compar_fn_t functor = (pRes->nodeType == TAST_NODE_TYPE_METER_PTR) ? tabObjVGIDComparator : nodeVGIDComparator; + qsort(pRes->pRes, numOfTotal, POINTER_BYTES, functor); + + pRes->num = numOfTotal; + free(descriptor->pTagSchema); + free(descriptor); + free(startPos); +} + +static char *getTagValueFromMeter(STabObj *pMeter, int32_t offset, void *param) { + if (offset == -1) { + extractMeterName(pMeter->meterId, param); + return param; + } else { + char *tags = pMeter->pTagData + TSDB_METER_ID_LEN; // tag start position + return (tags + offset); + } +} + +// todo refactor +bool tSQLElemFilterCallback(tSkipListNode *pNode, void *param) { + tQueryInfo *pCols = (tQueryInfo *)param; + STabObj * pMeter = (STabObj *)pNode->pData; + + char name[TSDB_METER_NAME_LEN + 1] = {0}; + char * val = getTagValueFromMeter(pMeter, pCols->offset, name); + int8_t type = (pCols->pSchema[pCols->colIdx].type); + + int32_t ret = 0; + if (pCols->q.nType == TSDB_DATA_TYPE_BINARY || pCols->q.nType == TSDB_DATA_TYPE_NCHAR) { + ret = pCols->comparator(val, pCols->q.pz); + } else { + tVariant v = {0}; + switch (type) { + case TSDB_DATA_TYPE_INT: + v.i64Key = *(int32_t *)val; + break; + case TSDB_DATA_TYPE_BIGINT: + v.i64Key = *(int64_t *)val; + break; + case TSDB_DATA_TYPE_TINYINT: + v.i64Key = *(int8_t *)val; + break; + case TSDB_DATA_TYPE_SMALLINT: + v.i64Key = *(int16_t *)val; + break; + case TSDB_DATA_TYPE_DOUBLE: + v.dKey = *(double *)val; + break; + case TSDB_DATA_TYPE_FLOAT: + v.dKey = *(float *)val; + break; + case TSDB_DATA_TYPE_BOOL: + v.i64Key = *(int8_t *)val; + break; + } + ret = pCols->comparator(&v.i64Key, &pCols->q.i64Key); + } + + switch (pCols->optr) { + case TSDB_RELATION_EQUAL: { + return ret == 0; + } + case TSDB_RELATION_NOT_EQUAL: { + return ret != 0; + } + case TSDB_RELATION_LARGE_EQUAL: { + return ret >= 0; + } + case TSDB_RELATION_LARGE: { + return ret > 0; + } + case TSDB_RELATION_LESS_EQUAL: { + return ret <= 0; + } + case TSDB_RELATION_LESS: { + return ret < 0; + } + case TSDB_RELATION_LIKE: { + return ret == 0; + } + + default: + assert(false); + } + return true; +} + +int mgmtRetrieveMetersFromMetric(STabObj *pMetric, SMetricMetaMsg *pInfo, tQueryResultset *pRes) { + /* no table created in accordance with this metric. */ + if (pMetric->pSkipList == NULL || pMetric->pSkipList->nSize == 0) { + assert(pMetric->numOfMeters == 0); + return TSDB_CODE_SUCCESS; + } + + char * pQueryCond = pInfo->tags; + int32_t queryCondLength = pInfo->condLength; + + tSQLBinaryExpr *pExpr = NULL; + SSchema * pTagSchema = (SSchema *)(pMetric->schema + pMetric->numOfColumns * sizeof(SSchema)); + + char *queryStr = calloc(1, (queryCondLength + 1) * TSDB_NCHAR_SIZE); + if (queryCondLength > 0) { + /* transfer the unicode string to mbs binary expression */ + taosUcs4ToMbs(pQueryCond, queryCondLength * TSDB_NCHAR_SIZE, queryStr); + queryCondLength = strlen(queryStr) + 1; + + mTrace("metric:%s len:%d, type:%d, metric query condition:%s", pMetric->meterId, queryCondLength, pInfo->type, queryStr); + } else { + mTrace("metric:%s, retrieve all meter, no query condition", pMetric->meterId); + } + + if (queryCondLength > 0) { + if (pInfo->type == TSQL_STABLE_QTYPE_SET) { + char *oldStr = strdup(queryStr); + mgmtRetrieveMetersFromIDs(pRes, queryStr, oldStr, pMetric); + tfree(oldStr); + } else { + tSQLBinaryExprFromString(&pExpr, pTagSchema, pMetric->numOfTags, queryStr, queryCondLength); + + /* failed to build expression, no result, return immediately */ + if (pExpr == NULL) { + mError("metric:%s, no result returned, error in metric query expression:%s", pMetric->meterId, queryStr); + tfree(queryStr); + return TSDB_CODE_OPS_NOT_SUPPORT; + } else { + // query according to the binary expression + tSQLBinaryExprTraverse(pExpr, pMetric->pSkipList, pTagSchema, pMetric->numOfTags, tSQLElemFilterCallback, pRes); + tSQLBinaryExprDestroy(&pExpr); + } + } + } else { + pRes->num = tSkipListIterateList(pMetric->pSkipList, (tSkipListNode ***)&pRes->pRes, NULL, NULL); + } + + tfree(queryStr); + mTrace("metric:%s numOfRes:%d", pMetric->meterId, pRes->num); + + mgmtReorganizeMetersInMetricMeta(pMetric, pInfo, pTagSchema, pRes); + return TSDB_CODE_SUCCESS; +} + +static int32_t mgmtGetMetricMetaMsgSize(tQueryResultset *pResult, int32_t tagLength, int32_t maxMetersPerQuery) { + int32_t numOfVnodes = mgmtGetNumOfVnodesInResult(pResult); + + int32_t size = (sizeof(SMeterSidExtInfo) + tagLength) * pResult->num + + ((pResult->num / maxMetersPerQuery) + 1 + numOfVnodes) * sizeof(SVnodeSidList) + sizeof(SMetricMeta) + + 1024; + + return size; +} + +int mgmtRetrieveMetricMeta(void *thandle, char **pStart, STabObj *pMetric, SMetricMetaMsg *pMetricMetaMsg) { + SVnodeSidList *pList = NULL; + + int32_t tagLen = mgmtGetReqTagsLength(pMetric, (int16_t *)pMetricMetaMsg->tagCols, pMetricMetaMsg->numOfTags); + + /* + * naive method: Do not limit the maximum number of meters in each + * vnode(subquery), + * split the result according to vnodes + * todo: split the number of vnodes to make sure each vnode has the same + * number of + * tables to query, while not break the upper limit of number of vnode queries + */ + int32_t maxMetersPerVNodeInQuery = INT32_MAX; + + int ovgId = -1; + + tQueryResultset result = {0}; + int ret = mgmtRetrieveMetersFromMetric(pMetric, pMetricMetaMsg, &result); + + int rspMsgSize = 512; + if (ret == TSDB_CODE_SUCCESS) { + rspMsgSize = mgmtGetMetricMetaMsgSize(&result, tagLen, maxMetersPerVNodeInQuery); + } + + *pStart = taosBuildRspMsgWithSize(thandle, TSDB_MSG_TYPE_METRIC_META_RSP, rspMsgSize); + if (*pStart == NULL) return 0; + + char * pMsg = (*pStart); + STaosRsp *pRsp = (STaosRsp *)pMsg; + + pRsp->code = ret; + pMsg += sizeof(STaosRsp); + *pMsg = TSDB_IE_TYPE_META; + pMsg++; + + if (ret != TSDB_CODE_SUCCESS) { + return pMsg - (*pStart); // one bit in payload + } + + SMetricMeta *pMeta = (SMetricMeta *)pMsg; + + pMeta->numOfMeters = 0; + pMeta->numOfVnodes = 0; + pMeta->tagLen = htons((uint16_t)tagLen); + + pMsg = (char *)pMeta + sizeof(SMetricMeta); + + // char* start = pMsg; + for (int32_t i = 0; i < result.num; ++i) { + STabObj *pMeter = mgmtGetResultPayload(&result, i); + +#ifdef _DEBUG_VIEW + mTrace("vgid: %d, sid: %d", pMeter->gid.vgId, pMeter->gid.sid); +#endif + pMsg = mgmtBuildMetricMetaMsg(pMeter, &ovgId, &pList, pMeta, tagLen, pMetricMetaMsg->numOfTags, + pMetricMetaMsg->tagCols, maxMetersPerVNodeInQuery, pMsg); + } + + // char* output = malloc(pMsg - (*pStart)); + // int32_t len = tsCompressString(start, pMsg - start, 0, output, 2, NULL); + + int32_t msgLen = pMsg - (*pStart); + mTrace("metric:%s metric-meta tables:%d, vnode:%d, msg size %d", pMetric->meterId, pMeta->numOfMeters, + pMeta->numOfVnodes, msgLen); + + pMeta->numOfMeters = htonl(pMeta->numOfMeters); + pMeta->numOfVnodes = htonl(pMeta->numOfVnodes); + + tfree(result.pRes); + return msgLen; +} + +int mgmtRetrieveMeters(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + STabObj *pMeter = NULL; + char * pWrite; + int cols = 0; + int prefixLen; + int numOfRead = 0; + char prefix[20] = {0}; + + if (pConn->pDb == NULL) return 0; + strcpy(prefix, pConn->pDb->name); + strcat(prefix, TS_PATH_DELIMITER); + prefixLen = strlen(prefix); + + SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; + char meterName[TSDB_METER_NAME_LEN] = {0}; + + while (numOfRows < rows) { + pShow->pNode = sdbFetchRow(meterSdb, pShow->pNode, (void **)&pMeter); + if (pMeter == NULL) break; + + if (mgmtIsMetric(pMeter)) continue; + + // not belong to current db + if (strncmp(pMeter->meterId, prefix, prefixLen)) continue; + + numOfRead++; + memset(meterName, 0, tListLen(meterName)); + + // pattern compare for meter name + extractMeterName(pMeter->meterId, meterName); + + if (pShow->payloadLen > 0 && + patternMatch(pShow->payload, meterName, TSDB_METER_NAME_LEN, &info) != TSDB_PATTERN_MATCH) + continue; + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strncpy(pWrite, meterName, TSDB_METER_NAME_LEN); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pMeter->createdTime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pMeter->numOfColumns; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + if (pMeter->pTagData) { + extractMeterName(pMeter->pTagData, pWrite); + } + cols++; + + numOfRows++; + } + + pShow->numOfReads += numOfRead; + const int32_t NUM_OF_COLUMNS = 4; + + mgmtVacuumResult(data, NUM_OF_COLUMNS, numOfRows, rows, pShow); + + return numOfRows; +} + +int mgmtGetMetricMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + if (pConn->pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = TSDB_METER_NAME_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "name"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "created time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "columns"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "tags"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "tables"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->numOfRows = pConn->pDb->numOfMetrics; + pShow->pNode = pConn->pDb->pMetric; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + return 0; +} + +int mgmtRetrieveMetrics(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + STabObj *pMetric = NULL; + char * pWrite; + int cols = 0; + + SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; + + char metricName[TSDB_METER_NAME_LEN] = {0}; + + while (numOfRows < rows) { + pMetric = (STabObj *)pShow->pNode; + if (pMetric == NULL) break; + pShow->pNode = (void *)pMetric->next; + + memset(metricName, 0, tListLen(metricName)); + extractMeterName(pMetric->meterId, metricName); + + if (pShow->payloadLen > 0 && + patternMatch(pShow->payload, metricName, TSDB_METER_NAME_LEN, &info) != TSDB_PATTERN_MATCH) + continue; + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + extractMeterName(pMetric->meterId, pWrite); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pMetric->createdTime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pMetric->numOfColumns; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pMetric->numOfTags; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pMetric->numOfMeters; + cols++; + + numOfRows++; + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} + +int32_t mgmtFindTagCol(STabObj *pMetric, const char *tagName) { + if (!mgmtIsMetric(pMetric)) return -1; + + SSchema *schema = NULL; + + for (int i = 0; i < pMetric->numOfTags; i++) { + schema = (SSchema *)(pMetric->schema + (pMetric->numOfColumns + i) * sizeof(SSchema)); + + if (strcasecmp(tagName, schema->name) == 0) return i; + } + + return -1; +} + +int32_t mgmtMeterModifyTagNameByCol(STabObj *pMetric, uint32_t col, const char *nname) { + int rowSize = 0; + assert(col >= 0); + + uint32_t len = strlen(nname); + + if (pMetric == NULL || (!mgmtIsMetric(pMetric)) || col >= pMetric->numOfTags || len >= TSDB_COL_NAME_LEN || + mgmtFindTagCol(pMetric, nname) >= 0) + return TSDB_CODE_APP_ERROR; + + // update + SSchema *schema = (SSchema *)(pMetric->schema + (pMetric->numOfColumns + col) * sizeof(SSchema)); + strncpy(schema->name, nname, TSDB_COL_NAME_LEN); + + // Encode string + int size = 1 + sizeof(STabObj) + TSDB_MAX_BYTES_PER_ROW; + char *msg = (char *)malloc(size); + if (msg == NULL) return TSDB_CODE_APP_ERROR; + memset(msg, 0, size); + + mgmtMeterActionEncode(pMetric, msg, size, &rowSize); + + int32_t ret = sdbUpdateRow(meterSdb, msg, rowSize, 1); + tfree(msg); + + if (ret < 0) { + mError("Failed to modify table %s tag column", pMetric->meterId); + return TSDB_CODE_APP_ERROR; + } + + mError("Succeed to modify table %s tag column", pMetric->meterId); + return TSDB_CODE_SUCCESS; +} + +int32_t mgmtMeterModifyTagNameByName(STabObj *pMetric, const char *oname, const char *nname) { + if (pMetric == NULL || (!mgmtIsMetric(pMetric))) return TSDB_CODE_APP_ERROR; + + int index = mgmtFindTagCol(pMetric, oname); + if (index < 0) { + // Tag name does not exist + mError("Failed to modify table %s tag column, oname: %s, nname: %s", pMetric->meterId, oname, nname); + return TSDB_CODE_INVALID_MSG_TYPE; + } + + return mgmtMeterModifyTagNameByCol(pMetric, index, nname); +} + +int32_t mgmtMeterModifyTagValueByCol(STabObj *pMeter, int col, const char *nContent) { + int rowSize = 0; + if (pMeter == NULL || nContent == NULL || (!mgmtMeterCreateFromMetric(pMeter))) return TSDB_CODE_APP_ERROR; + + STabObj *pMetric = mgmtGetMeter(pMeter->pTagData); + assert(pMetric != NULL); + + if (col < 0 || col > pMetric->numOfTags) return TSDB_CODE_APP_ERROR; + + SSchema *schema = (SSchema *)(pMetric->schema + (pMetric->numOfColumns + col) * sizeof(SSchema)); + + if (col == 0) { + pMeter->isDirty = 1; + removeMeterFromMetricIndex(pMetric, pMeter); + } + memcpy(pMeter->pTagData + mgmtGetTagsLength(pMetric, col) + TSDB_METER_ID_LEN, nContent, schema->bytes); + if (col == 0) { + addMeterIntoMetricIndex(pMetric, pMeter); + } + + // Encode the string + int size = sizeof(STabObj) + TSDB_MAX_BYTES_PER_ROW + 1; + char *msg = (char *)malloc(size); + if (msg == NULL) { + mError("failed to allocate message memory while modify tag value"); + return TSDB_CODE_APP_ERROR; + } + memset(msg, 0, size); + + mgmtMeterActionEncode(pMeter, msg, size, &rowSize); + + int32_t ret = sdbUpdateRow(meterSdb, msg, rowSize, 1); // Need callback function + tfree(msg); + + if (pMeter->isDirty) pMeter->isDirty = 0; + + if (ret < 0) { + mError("Failed to modify tag column %d of table %s", col, pMeter->meterId); + return TSDB_CODE_APP_ERROR; + } + + mTrace("Succeed to modify tag column %d of table %s", col, pMeter->meterId); + return TSDB_CODE_SUCCESS; +} + +int32_t mgmtMeterModifyTagValueByName(STabObj *pMeter, char *tagName, char *nContent) { + if (pMeter == NULL || tagName == NULL || nContent == NULL || (!mgmtMeterCreateFromMetric(pMeter))) + return TSDB_CODE_INVALID_MSG_TYPE; + + STabObj *pMetric = mgmtGetMeter(pMeter->pTagData); + if (pMetric == NULL) return TSDB_CODE_APP_ERROR; + + int col = mgmtFindTagCol(pMetric, tagName); + if (col < 0) return TSDB_CODE_APP_ERROR; + + return mgmtMeterModifyTagValueByCol(pMeter, col, nContent); +} + +int32_t mgmtMeterAddTags(STabObj *pMetric, SSchema schema[], int ntags) { + if (pMetric == NULL || (!mgmtIsMetric(pMetric))) return TSDB_CODE_INVALID_TABLE; + + if (pMetric->numOfTags + ntags > TSDB_MAX_TAGS) return TSDB_CODE_APP_ERROR; + + // check if schemas have the same name + for (int i = 1; i < ntags; i++) { + for (int j = 0; j < i; j++) { + if (strcasecmp(schema[i].name, schema[j].name) == 0) { + return TSDB_CODE_APP_ERROR; + } + } + } + + for (int i = 0; i < ntags; i++) { + if (mgmtFindTagCol(pMetric, schema[i].name) >= 0) { + return TSDB_CODE_APP_ERROR; + } + } + + uint32_t size = sizeof(SMeterBatchUpdateMsg) + sizeof(SSchema) * ntags; + SMeterBatchUpdateMsg *msg = (SMeterBatchUpdateMsg *)malloc(size); + memset(msg, 0, size); + + memcpy(msg->meterId, pMetric->meterId, TSDB_METER_ID_LEN); + msg->type = SDB_TYPE_INSERT; + msg->cols = ntags; + memcpy(msg->data, schema, sizeof(SSchema) * ntags); + + int32_t ret = sdbBatchUpdateRow(meterSdb, msg, size); + tfree(msg); + + if (ret < 0) { + mError("Failed to add tag column %s to table %s", schema[0].name, pMetric->meterId); + return TSDB_CODE_APP_ERROR; + } + + mTrace("Succeed to add tag column %s to table %s", schema[0].name, pMetric->meterId); + return TSDB_CODE_SUCCESS; +} + +int32_t mgmtMeterDropTagByCol(STabObj *pMetric, int col) { + if (pMetric == NULL || (!mgmtIsMetric(pMetric)) || col <= 0 || col >= pMetric->numOfTags) return TSDB_CODE_APP_ERROR; + + // Pack message to do batch update + uint32_t size = sizeof(SMeterBatchUpdateMsg) + sizeof(SchemaUnit); + SMeterBatchUpdateMsg *msg = (SMeterBatchUpdateMsg *)malloc(size); + memset(msg, 0, size); + + memcpy(msg->meterId, pMetric->meterId, TSDB_METER_ID_LEN); + msg->type = SDB_TYPE_DELETE; // TODO: what should here be ? + msg->cols = 1; + + ((SchemaUnit *)(msg->data))->col = col; + ((SchemaUnit *)(msg->data))->pos = mgmtGetTagsLength(pMetric, col) + TSDB_METER_ID_LEN; + ((SchemaUnit *)(msg->data))->schema = *(SSchema *)(pMetric->schema + sizeof(SSchema) * (pMetric->numOfColumns + col)); + + int32_t ret = sdbBatchUpdateRow(meterSdb, msg, size); + tfree(msg); + + if (ret < 0) { + mError("Failed to drop tag column: %d from table: %s", col, pMetric->meterId); + return TSDB_CODE_APP_ERROR; + } + + mTrace("Succeed to drop tag column: %d from table: %s", col, pMetric->meterId); + return TSDB_CODE_SUCCESS; +} + +int32_t mgmtMeterDropTagByName(STabObj *pMetric, char *name) { + if (pMetric == NULL || (!mgmtIsMetric(pMetric))) { + mTrace("Failed to drop tag name: %s from table: %s", name, pMetric->meterId); + return TSDB_CODE_INVALID_TABLE; + } + + int col = mgmtFindTagCol(pMetric, name); + + return mgmtMeterDropTagByCol(pMetric, col); +} + +int32_t mgmtFindColumnIndex(STabObj *pMeter, const char *colName) { + STabObj *pMetric = NULL; + SSchema *schema = NULL; + + if (pMeter->meterType == TSDB_METER_OTABLE || pMeter->meterType == TSDB_METER_METRIC) { + schema = (SSchema *)pMeter->schema; + for (int32_t i = 0; i < pMeter->numOfColumns; i++) { + if (strcasecmp(schema[i].name, colName) == 0) { + return i; + } + } + + } else if (pMeter->meterType == TSDB_METER_MTABLE) { + pMetric = mgmtGetMeter(pMeter->pTagData); + if (pMetric == NULL) { + mError("MTable not belongs to any metric, meter: %s", pMeter->meterId); + return -1; + } + schema = (SSchema *)pMetric->schema; + for (int32_t i = 0; i < pMetric->numOfColumns; i++) { + if (strcasecmp(schema[i].name, colName) == 0) { + return i; + } + } + } + + return -1; +} + +int32_t mgmtMeterAddColumn(STabObj *pMeter, SSchema schema[], int ncols) { + SAcctObj *pAcct = NULL; + SDbObj * pDb = NULL; + + if (pMeter == NULL || pMeter->meterType == TSDB_METER_MTABLE || pMeter->meterType == TSDB_METER_STABLE || ncols <= 0) + return TSDB_CODE_APP_ERROR; + + // ASSUMPTION: no two tags are the same + for (int i = 0; i < ncols; i++) + if (mgmtFindColumnIndex(pMeter, schema[i].name) > 0) return TSDB_CODE_APP_ERROR; + + pDb = mgmtGetDbByMeterId(pMeter->meterId); + if (pDb == NULL) { + mError("meter: %s not belongs to any database", pMeter->meterId); + return TSDB_CODE_APP_ERROR; + } + + pAcct = &acctObj; + pMeter->schema = realloc(pMeter->schema, pMeter->schemaSize + sizeof(SSchema) * ncols); + + if (pMeter->meterType == TSDB_METER_OTABLE) { + memcpy(pMeter->schema + pMeter->schemaSize, schema, sizeof(SSchema) * ncols); + } else if (pMeter->meterType == TSDB_METER_METRIC) { + memmove(pMeter->schema + sizeof(SSchema) * (pMeter->numOfColumns + ncols), + pMeter->schema + sizeof(SSchema) * pMeter->numOfColumns, sizeof(SSchema) * pMeter->numOfTags); + memcpy(pMeter->schema + sizeof(SSchema) * pMeter->numOfColumns, schema, sizeof(SSchema) * ncols); + } + + SSchema *tschema = (SSchema *)(pMeter->schema + sizeof(SSchema) * pMeter->numOfColumns); + for (int i = 0; i < ncols; i++) tschema[i].colId = pMeter->nextColId++; + + pMeter->schemaSize += sizeof(SSchema) * ncols; + pMeter->numOfColumns += ncols; + pMeter->sversion++; + if (mgmtIsNormalMeter(pMeter)) + pAcct->acctInfo.numOfTimeSeries += ncols; + else + pAcct->acctInfo.numOfTimeSeries += (ncols * pMeter->numOfMeters); + sdbUpdateRow(meterSdb, pMeter, 0, 1); + + if (pMeter->meterType == TSDB_METER_METRIC) { + for (STabObj *pObj = pMeter->pHead; pObj != NULL; pObj = pObj->next) { + pObj->numOfColumns++; + pObj->nextColId = pMeter->nextColId; + pObj->sversion = pMeter->sversion; + sdbUpdateRow(meterSdb, pObj, 0, 1); + } + } + + return TSDB_CODE_SUCCESS; +} + +int32_t mgmtMeterDropColumnByName(STabObj *pMeter, const char *name) { + SAcctObj *pAcct = NULL; + SDbObj * pDb = NULL; + + if (pMeter == NULL || pMeter->meterType == TSDB_METER_MTABLE || pMeter->meterType == TSDB_METER_STABLE) + return TSDB_CODE_APP_ERROR; + + int32_t index = mgmtFindColumnIndex(pMeter, name); + if (index < 0) return TSDB_CODE_APP_ERROR; + + pDb = mgmtGetDbByMeterId(pMeter->meterId); + if (pDb == NULL) { + mError("meter: %s not belongs to any database", pMeter->meterId); + return TSDB_CODE_APP_ERROR; + } + + pAcct = &acctObj; + + if (pMeter->meterType == TSDB_METER_OTABLE) { + memmove(pMeter->schema + sizeof(SSchema) * index, pMeter->schema + sizeof(SSchema) * (index + 1), + sizeof(SSchema) * (pMeter->numOfColumns - index - 1)); + } else if (pMeter->meterType == TSDB_METER_METRIC) { + memmove(pMeter->schema + sizeof(SSchema) * index, pMeter->schema + sizeof(SSchema) * (index + 1), + sizeof(SSchema) * (pMeter->numOfColumns + pMeter->numOfTags - index - 1)); + } + pMeter->schemaSize -= sizeof(SSchema); + pMeter->numOfColumns--; + if (mgmtIsNormalMeter(pMeter)) + pAcct->acctInfo.numOfTimeSeries--; + else + pAcct->acctInfo.numOfTimeSeries -= (pMeter->numOfMeters); + + pMeter->schema = realloc(pMeter->schema, pMeter->schemaSize); + pMeter->sversion++; + sdbUpdateRow(meterSdb, pMeter, 0, 1); + + if (pMeter->meterType == TSDB_METER_METRIC) { + for (STabObj *pObj = pMeter->pHead; pObj != NULL; pObj = pObj->next) { + pObj->numOfColumns--; + pObj->sversion = pMeter->sversion; + sdbUpdateRow(meterSdb, pObj, 0, 1); + } + } + + return TSDB_CODE_SUCCESS; +} diff --git a/src/system/src/mgmtProfile.c b/src/system/src/mgmtProfile.c new file mode 100644 index 000000000000..6b612b75a0bc --- /dev/null +++ b/src/system/src/mgmtProfile.c @@ -0,0 +1,533 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "mgmt.h" +#include +#include "mgmtProfile.h" +#include "taosmsg.h" +#include "tschemautil.h" + +typedef struct { + uint32_t ip; + short port; + char user[TSDB_METER_ID_LEN]; +} SCDesc; + +typedef struct { + int index; + int numOfQueries; + SCDesc * connInfo; + SCDesc **cdesc; + SQDesc qdesc[]; +} SQueryShow; + +typedef struct { + int index; + int numOfStreams; + SCDesc * connInfo; + SCDesc **cdesc; + SSDesc sdesc[]; +} SStreamShow; + +int mgmtSaveQueryStreamList(char *cont, int contLen, SConnObj *pConn) { + SAcctObj *pAcct = pConn->pAcct; + + if (contLen <= 0) { + return 0; + } + + pthread_mutex_lock(&pAcct->mutex); + + if (pConn->pQList) { + pAcct->acctInfo.numOfQueries -= pConn->pQList->numOfQueries; + pAcct->acctInfo.numOfStreams -= pConn->pSList->numOfStreams; + } + + pConn->pQList = realloc(pConn->pQList, contLen); + memcpy(pConn->pQList, cont, contLen); + + pConn->pSList = (SSList *)(((char *)pConn->pQList) + pConn->pQList->numOfQueries * sizeof(SQDesc) + sizeof(SQList)); + + pAcct->acctInfo.numOfQueries += pConn->pQList->numOfQueries; + pAcct->acctInfo.numOfStreams += pConn->pSList->numOfStreams; + + pthread_mutex_unlock(&pAcct->mutex); + + return 0; +} + +int mgmtGetQueries(SShowObj *pShow, SConnObj *pConn) { + SAcctObj * pAcct = pConn->pAcct; + SQueryShow *pQueryShow; + + pthread_mutex_lock(&pAcct->mutex); + + pQueryShow = malloc(sizeof(SQDesc) * pAcct->acctInfo.numOfQueries + sizeof(SQueryShow)); + pQueryShow->numOfQueries = 0; + pQueryShow->index = 0; + pQueryShow->connInfo = NULL; + pQueryShow->cdesc = NULL; + + if (pAcct->acctInfo.numOfQueries > 0) { + pQueryShow->connInfo = (SCDesc *)malloc(pAcct->acctInfo.numOfConns * sizeof(SCDesc)); + pQueryShow->cdesc = (SCDesc **)malloc(pAcct->acctInfo.numOfQueries * sizeof(SCDesc *)); + + pConn = pAcct->pConn; + SQDesc * pQdesc = pQueryShow->qdesc; + SCDesc * pCDesc = pQueryShow->connInfo; + SCDesc **ppCDesc = pQueryShow->cdesc; + + while (pConn) { + if (pConn->pQList && pConn->pQList->numOfQueries > 0) { + pCDesc->ip = pConn->ip; + pCDesc->port = pConn->port; + strcpy(pCDesc->user, pConn->pUser->user); + + memcpy(pQdesc, pConn->pQList->qdesc, sizeof(SQDesc) * pConn->pQList->numOfQueries); + pQdesc += pConn->pQList->numOfQueries; + pQueryShow->numOfQueries += pConn->pQList->numOfQueries; + for (int i = 0; i < pConn->pQList->numOfQueries; ++i, ++ppCDesc) *ppCDesc = pCDesc; + + pCDesc++; + } + pConn = pConn->next; + } + } + + pthread_mutex_unlock(&pAcct->mutex); + + // sorting based on useconds + + pShow->pNode = pQueryShow; + + return 0; +} + +int mgmtGetQueryMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = TSDB_USER_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "user"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_IPv4ADDR_LEN + 14; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "ip:port:id"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "created time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_BIGINT; + strcpy(pSchema[cols].name, "time(us)"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_SHOW_SQL_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "sql"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->numOfRows = 1000000; + pShow->pNode = NULL; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + mgmtGetQueries(pShow, pConn); + return 0; +} + +int mgmtKillQuery(char *qidstr, SConnObj *pConn) { + char *temp, *chr, idstr[64]; + strcpy(idstr, qidstr); + + temp = idstr; + chr = strchr(temp, ':'); + if (chr == NULL) goto _error; + *chr = 0; + uint32_t ip = inet_addr(temp); + + temp = chr + 1; + chr = strchr(temp, ':'); + if (chr == NULL) goto _error; + *chr = 0; + short port = htons(atoi(temp)); + + temp = chr + 1; + uint32_t queryId = atoi(temp); + + SAcctObj *pAcct = pConn->pAcct; + + pthread_mutex_lock(&pAcct->mutex); + + pConn = pAcct->pConn; + while (pConn) { + if (pConn->ip == ip && pConn->port == port && pConn->pQList) { + int i; + SQDesc *pQDesc = pConn->pQList->qdesc; + for (i = 0; i < pConn->pQList->numOfQueries; ++i, ++pQDesc) { + if (pQDesc->queryId == queryId) break; + } + + if (i < pConn->pQList->numOfQueries) break; + } + + pConn = pConn->next; + } + + if (pConn) pConn->queryId = queryId; + + pthread_mutex_unlock(&pAcct->mutex); + + if (pConn == NULL || pConn->pQList == NULL || pConn->pQList->numOfQueries == 0) goto _error; + + mTrace("query:%s is there, kill it", qidstr); + return 0; + +_error: + mTrace("query:%s is not there", qidstr); + + return TSDB_CODE_INVALID_QUERY_ID; +} + +int mgmtRetrieveQueries(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + char *pWrite; + int cols = 0; + + SQueryShow *pQueryShow = (SQueryShow *)pShow->pNode; + + if (rows > pQueryShow->numOfQueries - pQueryShow->index) rows = pQueryShow->numOfQueries - pQueryShow->index; + + while (numOfRows < rows) { + SQDesc *pNode = pQueryShow->qdesc + pQueryShow->index; + SCDesc *pCDesc = pQueryShow->cdesc[pQueryShow->index]; + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pCDesc->user); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + uint32_t ip = pCDesc->ip; + sprintf(pWrite, "%d.%d.%d.%d:%hu:%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24, htons(pCDesc->port), + pNode->queryId); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pNode->stime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pNode->useconds; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pNode->sql); + cols++; + + numOfRows++; + pQueryShow->index++; + } + + if (numOfRows == 0) { + tfree(pQueryShow->cdesc); + tfree(pQueryShow->connInfo); + tfree(pQueryShow); + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} + +int mgmtGetStreams(SShowObj *pShow, SConnObj *pConn) { + SAcctObj * pAcct = pConn->pAcct; + SStreamShow *pStreamShow; + + pthread_mutex_lock(&pAcct->mutex); + + pStreamShow = malloc(sizeof(SSDesc) * pAcct->acctInfo.numOfStreams + sizeof(SQueryShow)); + pStreamShow->numOfStreams = 0; + pStreamShow->index = 0; + pStreamShow->connInfo = NULL; + pStreamShow->cdesc = NULL; + + if (pAcct->acctInfo.numOfStreams > 0) { + pStreamShow->connInfo = (SCDesc *)malloc(pAcct->acctInfo.numOfConns * sizeof(SCDesc)); + pStreamShow->cdesc = (SCDesc **)malloc(pAcct->acctInfo.numOfStreams * sizeof(SCDesc *)); + + pConn = pAcct->pConn; + SSDesc * pSdesc = pStreamShow->sdesc; + SCDesc * pCDesc = pStreamShow->connInfo; + SCDesc **ppCDesc = pStreamShow->cdesc; + + while (pConn) { + if (pConn->pSList && pConn->pSList->numOfStreams > 0) { + pCDesc->ip = pConn->ip; + pCDesc->port = pConn->port; + strcpy(pCDesc->user, pConn->pUser->user); + + memcpy(pSdesc, pConn->pSList->sdesc, sizeof(SSDesc) * pConn->pSList->numOfStreams); + pSdesc += pConn->pSList->numOfStreams; + pStreamShow->numOfStreams += pConn->pSList->numOfStreams; + for (int i = 0; i < pConn->pSList->numOfStreams; ++i, ++ppCDesc) *ppCDesc = pCDesc; + + pCDesc++; + } + pConn = pConn->next; + } + } + + pthread_mutex_unlock(&pAcct->mutex); + + // sorting based on useconds + + pShow->pNode = pStreamShow; + + return 0; +} + +int mgmtGetStreamMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = TSDB_USER_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "user"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_IPv4ADDR_LEN + 14; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "ip:port:id"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "created time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "exec time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "time(us)"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_SHOW_SQL_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "sql"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "cycles"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->numOfRows = 1000000; + pShow->pNode = NULL; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + mgmtGetStreams(pShow, pConn); + return 0; +} + +int mgmtRetrieveStreams(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + char *pWrite; + int cols = 0; + + SStreamShow *pStreamShow = (SStreamShow *)pShow->pNode; + + if (rows > pStreamShow->numOfStreams - pStreamShow->index) rows = pStreamShow->numOfStreams - pStreamShow->index; + + while (numOfRows < rows) { + SSDesc *pNode = pStreamShow->sdesc + pStreamShow->index; + SCDesc *pCDesc = pStreamShow->cdesc[pStreamShow->index]; + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pCDesc->user); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + uint32_t ip = pCDesc->ip; + sprintf(pWrite, "%d.%d.%d.%d:%hu:%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24, htons(pCDesc->port), + pNode->streamId); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pNode->ctime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pNode->stime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pNode->useconds; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pNode->sql); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pNode->num; + cols++; + + numOfRows++; + pStreamShow->index++; + } + + if (numOfRows == 0) { + tfree(pStreamShow->cdesc); + tfree(pStreamShow->connInfo); + tfree(pStreamShow); + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} + +int mgmtKillStream(char *qidstr, SConnObj *pConn) { + char *temp, *chr, idstr[64]; + strcpy(idstr, qidstr); + + temp = idstr; + chr = strchr(temp, ':'); + if (chr == NULL) goto _error; + *chr = 0; + uint32_t ip = inet_addr(temp); + + temp = chr + 1; + chr = strchr(temp, ':'); + if (chr == NULL) goto _error; + *chr = 0; + short port = htons(atoi(temp)); + + temp = chr + 1; + uint32_t streamId = atoi(temp); + + SAcctObj *pAcct = pConn->pAcct; + + pthread_mutex_lock(&pAcct->mutex); + + pConn = pAcct->pConn; + while (pConn) { + if (pConn->ip == ip && pConn->port == port && pConn->pSList) { + int i; + SSDesc *pSDesc = pConn->pSList->sdesc; + for (i = 0; i < pConn->pSList->numOfStreams; ++i, ++pSDesc) { + if (pSDesc->streamId == streamId) break; + } + + if (i < pConn->pSList->numOfStreams) break; + } + + pConn = pConn->next; + } + + if (pConn) pConn->streamId = streamId; + + pthread_mutex_unlock(&pAcct->mutex); + + if (pConn == NULL || pConn->pSList == NULL || pConn->pSList->numOfStreams == 0) goto _error; + + mTrace("stream:%s is there, kill it", qidstr); + return 0; + +_error: + mTrace("stream:%s is not there", qidstr); + + return TSDB_CODE_INVALID_STREAM_ID; +} + +int mgmtKillConnection(char *qidstr, SConnObj *pConn) { + SConnObj *pConn1 = NULL; + char * temp, *chr, idstr[64]; + strcpy(idstr, qidstr); + + temp = idstr; + chr = strchr(temp, ':'); + if (chr == NULL) goto _error; + *chr = 0; + uint32_t ip = inet_addr(temp); + + temp = chr + 1; + short port = htons(atoi(temp)); + + SAcctObj *pAcct = pConn->pAcct; + + pthread_mutex_lock(&pAcct->mutex); + + pConn = pAcct->pConn; + while (pConn) { + if (pConn->ip == ip && pConn->port == port) { + // there maybe two connections from a shell + if (pConn1 == NULL) + pConn1 = pConn; + else + break; + } + + pConn = pConn->next; + } + + if (pConn1) pConn1->killConnection = 1; + if (pConn) pConn->killConnection = 1; + + pthread_mutex_unlock(&pAcct->mutex); + + if (pConn1 == NULL) goto _error; + + mTrace("connection:%s is there, kill it", qidstr); + return 0; + +_error: + mTrace("connection:%s is not there", qidstr); + + return TSDB_CODE_INVALID_CONNECTION; +} diff --git a/src/system/src/mgmtShell.c b/src/system/src/mgmtShell.c new file mode 100644 index 000000000000..1245fea909ae --- /dev/null +++ b/src/system/src/mgmtShell.c @@ -0,0 +1,978 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include "taosmsg.h" + +#include "dnodeSystem.h" +#include "mgmt.h" +#include "mgmtProfile.h" +#include "tlog.h" + +void * pShellConn = NULL; +SConnObj *connList; +void *mgmtProcessMsgFromShell(char *msg, void *ahandle, void *thandle); +int (*mgmtProcessShellMsg[TSDB_MSG_TYPE_MAX])(char *, int, SConnObj *); +void mgmtInitProcessShellMsg(); +int mgmtKillQuery(char *queryId, SConnObj *pConn); + +void mgmtProcessTranRequest(SSchedMsg *pSchedMsg) { + SIntMsg * pMsg = (SIntMsg *)(pSchedMsg->msg); + SConnObj *pConn = (SConnObj *)(pSchedMsg->thandle); + + char *cont = (char *)pMsg->content + sizeof(SMgmtHead); + int contLen = pMsg->msgLen - sizeof(SIntMsg) - sizeof(SMgmtHead); + (*mgmtProcessShellMsg[pMsg->msgType])(cont, contLen, pConn); + + if (pSchedMsg->msg) free(pSchedMsg->msg); +} + +int mgmtInitShell() { + SRpcInit rpcInit; + + mgmtInitProcessShellMsg(); + + int size = sizeof(SConnObj) * tsMaxShellConns; + connList = (SConnObj *)malloc(size); + memset(connList, 0, size); + + int numOfThreads = tsNumOfCores * tsNumOfThreadsPerCore / 4.0; + if (numOfThreads < 1) numOfThreads = 1; + + memset(&rpcInit, 0, sizeof(rpcInit)); + rpcInit.localIp = tsInternalIp; + rpcInit.localPort = tsMgmtShellPort; + rpcInit.label = "MND-shell"; + rpcInit.numOfThreads = numOfThreads; + rpcInit.fp = mgmtProcessMsgFromShell; + rpcInit.bits = 20; + rpcInit.numOfChanns = 1; + rpcInit.sessionsPerChann = tsMaxShellConns; + rpcInit.idMgmt = TAOS_ID_FREE; + rpcInit.connType = TAOS_CONN_UDPS; + rpcInit.idleTime = tsShellActivityTimer * 2000; + rpcInit.qhandle = mgmtQhandle; + rpcInit.afp = mgmtRetriveUserAuthInfo; + + pShellConn = taosOpenRpc(&rpcInit); + if (pShellConn == NULL) { + mError("failed to init tcp connection to shell"); + return -1; + } + + return 0; +} + +void mgmtCleanUpShell() { + if (pShellConn) taosCloseRpc(pShellConn); + pShellConn = NULL; + tfree(connList); +} + +static void mgmtSetSchemaFromMeters(SSchema *pSchema, STabObj *pMeterObj, uint32_t numOfCols) { + SSchema *pMeterSchema = (SSchema *)(pMeterObj->schema); + for (int i = 0; i < numOfCols; ++i) { + pSchema->type = pMeterSchema[i].type; + strcpy(pSchema->name, pMeterSchema[i].name); + pSchema->bytes = htons(pMeterSchema[i].bytes); + pSchema->colId = htons(pMeterSchema[i].colId); + pSchema++; + } +} + +static uint32_t mgmtSetMeterTagValue(char *pTags, STabObj *pMetric, STabObj *pMeterObj) { + SSchema *pTagSchema = (SSchema *)(pMetric->schema + pMetric->numOfColumns * sizeof(SSchema)); + + char *tagVal = pMeterObj->pTagData + TSDB_METER_ID_LEN; // tag start position + + uint32_t tagsLen = 0; + for (int32_t i = 0; i < pMetric->numOfTags; ++i) { + tagsLen += pTagSchema[i].bytes; + } + + memcpy(pTags, tagVal, tagsLen); + return tagsLen; +} + +static char *mgmtAllocMsg(SConnObj *pConn, int32_t size, char **pMsg, STaosRsp **pRsp) { + char *pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_METERINFO_RSP, size); + if (pStart == NULL) return 0; + *pMsg = pStart; + *pRsp = (STaosRsp *)(*pMsg); + + return pStart; +} + +int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SMeterInfoMsg *pInfo = (SMeterInfoMsg *)pMsg; + STabObj * pMeterObj = NULL; + SVgObj * pVgroup = NULL; + SMeterMeta * pMeta = NULL; + SSchema * pSchema = NULL; + STaosRsp * pRsp = NULL; + char * pStart = NULL; + + pInfo->createFlag = htons(pInfo->createFlag); + + int size = sizeof(STaosHeader) + sizeof(STaosRsp) + sizeof(SMeterMeta) + sizeof(SSchema) * TSDB_MAX_COLUMNS + + sizeof(SSchema) * TSDB_MAX_TAGS + TSDB_MAX_TAGS_LEN + TSDB_EXTRA_PAYLOAD_SIZE; + + if ((pConn->pDb != NULL && pConn->pDb->dropStatus != TSDB_DB_STATUS_READY) || pConn->pDb == NULL) { + if ((pStart = mgmtAllocMsg(pConn, size, &pMsg, &pRsp)) == NULL) { + return 0; + } + + pRsp->code = TSDB_CODE_INVALID_DB; + pMsg++; + + goto _exit_code; + } + + pMeterObj = mgmtGetMeter(pInfo->meterId); + if (pMeterObj == NULL && pInfo->createFlag == 1) { + // create the meter objects if not exists + SCreateTableMsg *pCreateMsg = calloc(1, sizeof(SCreateTableMsg) + sizeof(STagData)); + memcpy(pCreateMsg->schema, pInfo->tags, sizeof(STagData)); + strcpy(pCreateMsg->meterId, pInfo->meterId); + // todo handle if not master mnode + int32_t code = mgmtCreateMeter(pConn->pDb, pCreateMsg); + mTrace("meter:%s is automatically created by %s, code:%d", pCreateMsg->meterId, pConn->pUser->user, code); + tfree(pCreateMsg); + + if (code != TSDB_CODE_SUCCESS) { + if ((pStart = mgmtAllocMsg(pConn, size, &pMsg, &pRsp)) == NULL) { + return 0; + } + + pRsp->code = code; + pMsg++; + + goto _exit_code; + } + + pMeterObj = mgmtGetMeter(pInfo->meterId); + } + + if ((pStart = mgmtAllocMsg(pConn, size, &pMsg, &pRsp)) == NULL) { + return 0; + } + + if (pMeterObj == NULL) { + if (pConn->pDb) + pRsp->code = TSDB_CODE_INVALID_TABLE; + else + pRsp->code = TSDB_CODE_DB_NOT_SELECTED; + pMsg++; + } else { + mTrace("meter:%s, meta is retrieved from:%s", pInfo->meterId, pMeterObj->meterId); + pRsp->code = 0; + pMsg += sizeof(STaosRsp); + *pMsg = TSDB_IE_TYPE_META; + pMsg++; + + pMeta = (SMeterMeta *)pMsg; + pMeta->uid = htobe64(pMeterObj->uid); + pMeta->sid = htonl(pMeterObj->gid.sid); + pMeta->vgid = htonl(pMeterObj->gid.vgId); + pMeta->sversion = htonl(pMeterObj->sversion); + + pMeta->precision = htons(pConn->pDb->cfg.precision); + + pMeta->numOfTags = htons(pMeterObj->numOfTags); + pMeta->numOfColumns = htons(pMeterObj->numOfColumns); + pMeta->meterType = htons(pMeterObj->meterType); + + pMsg += sizeof(SMeterMeta); + pSchema = (SSchema *)pMsg; // schema locates at the end of SMeterMeta + // struct + + if (mgmtMeterCreateFromMetric(pMeterObj)) { + assert(pMeterObj->numOfTags == 0); + + STabObj *pMetric = mgmtGetMeter(pMeterObj->pTagData); + uint32_t numOfTotalCols = (uint32_t)pMetric->numOfTags + pMetric->numOfColumns; + + pMeta->numOfTags = htons(pMetric->numOfTags); // update the numOfTags + // info + mgmtSetSchemaFromMeters(pSchema, pMetric, numOfTotalCols); + pMsg += numOfTotalCols * sizeof(SSchema); + + // for meters created from metric, we need the metric tag schema to parse the tag data + int32_t tagsLen = mgmtSetMeterTagValue(pMsg, pMetric, pMeterObj); + + pMsg += tagsLen; + } else { + /* + * for metrics, or meters that are not created from metric, set the schema directly + * for meters created from metric, we use the schema of metric instead + */ + uint32_t numOfTotalCols = (uint32_t)pMeterObj->numOfTags + pMeterObj->numOfColumns; + mgmtSetSchemaFromMeters(pSchema, pMeterObj, numOfTotalCols); + pMsg += numOfTotalCols * sizeof(SSchema); + } + + if (mgmtIsNormalMeter(pMeterObj)) { + pVgroup = mgmtGetVgroup(pMeterObj->gid.vgId); + if (pVgroup == NULL) { + pRsp->code = TSDB_CODE_INVALID_TABLE; + goto _exit_code; + } + pMeta->vpeerDesc[0].vnode = htonl(pVgroup->vnodeGid[0].vnode); + } + } + +_exit_code: + msgLen = pMsg - pStart; + + taosSendMsgToPeer(pConn->thandle, pStart, msgLen); + + return msgLen; +} + +int mgmtProcessMetricMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SMetricMetaMsg *pMetricMetaMsg = (SMetricMetaMsg *)pMsg; + STabObj * pMetric; + STaosRsp * pRsp; + char * pStart; + + pMetric = mgmtGetMeter(pMetricMetaMsg->meterId); + + if (pMetric == NULL || (pConn->pDb != NULL && pConn->pDb->dropStatus != TSDB_DB_STATUS_READY)) { + pStart = taosBuildRspMsg(pConn->thandle, TSDB_MSG_TYPE_METRIC_META_RSP); + if (pStart == NULL) return 0; + + pMsg = pStart; + pRsp = (STaosRsp *)pMsg; + if (pConn->pDb) + pRsp->code = TSDB_CODE_INVALID_TABLE; + else + pRsp->code = TSDB_CODE_DB_NOT_SELECTED; + pMsg++; + + msgLen = pMsg - pStart; + } else { + pMetricMetaMsg->condLength = htonl(pMetricMetaMsg->condLength); + pMetricMetaMsg->orderIndex = htons(pMetricMetaMsg->orderIndex); + pMetricMetaMsg->orderType = htons(pMetricMetaMsg->orderType); + pMetricMetaMsg->numOfTags = htons(pMetricMetaMsg->numOfTags); + + pMetricMetaMsg->type = htons(pMetricMetaMsg->type); + pMetricMetaMsg->numOfGroupbyCols = htons(pMetricMetaMsg->numOfGroupbyCols); + + pMetricMetaMsg->groupbyTagIds = ((char *)pMetricMetaMsg->tags) + pMetricMetaMsg->condLength * TSDB_NCHAR_SIZE; + int16_t *groupbyColIds = (int16_t *)pMetricMetaMsg->groupbyTagIds; + for (int32_t i = 0; i < pMetricMetaMsg->numOfGroupbyCols; ++i) { + groupbyColIds[i] = htons(groupbyColIds[i]); + } + + for (int32_t i = 0; i < pMetricMetaMsg->numOfTags; ++i) { + pMetricMetaMsg->tagCols[i] = htons(pMetricMetaMsg->tagCols[i]); + } + + pMetricMetaMsg->limit = htobe64(pMetricMetaMsg->limit); + pMetricMetaMsg->offset = htobe64(pMetricMetaMsg->offset); + + msgLen = mgmtRetrieveMetricMeta(pConn->thandle, &pStart, pMetric, pMetricMetaMsg); + } + + taosSendMsgToPeer(pConn->thandle, pStart, msgLen); + + return msgLen; +} + +int mgmtProcessCreateDbMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SCreateDbMsg *pCreate = (SCreateDbMsg *)pMsg; + int code = 0; + + pCreate->maxSessions = htonl(pCreate->maxSessions); + pCreate->cacheBlockSize = htonl(pCreate->cacheBlockSize); + // pCreate->cacheNumOfBlocks = htonl(pCreate->cacheNumOfBlocks); + pCreate->daysPerFile = htonl(pCreate->daysPerFile); + pCreate->daysToKeep = htonl(pCreate->daysToKeep); + pCreate->daysToKeep1 = htonl(pCreate->daysToKeep1); + pCreate->daysToKeep2 = htonl(pCreate->daysToKeep2); + pCreate->commitTime = htonl(pCreate->commitTime); + pCreate->blocksPerMeter = htons(pCreate->blocksPerMeter); + pCreate->rowsInFileBlock = htonl(pCreate->rowsInFileBlock); + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + SAcctObj *pAcct = &acctObj; + code = mgmtCreateDb(pAcct, pCreate); + if (code == TSDB_CODE_SUCCESS) { + mLPrint("DB:%s is created by %s", pCreate->db, pConn->pUser->user); + } + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_DB_RSP, code); + + return 0; +} + +int mgmtProcessAlterDbMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SAlterDbMsg *pAlter = (SAlterDbMsg *)pMsg; + int code = 0; + + pAlter->daysPerFile = htonl(pAlter->daysPerFile); + pAlter->daysToKeep = htonl(pAlter->daysToKeep); + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + code = mgmtAlterDb(&acctObj, pAlter); + if (code == TSDB_CODE_SUCCESS) { + mLPrint("DB:%s is altered by %s", pAlter->db, pConn->pUser->user); + } + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_DB_RSP, code); + + return 0; +} + +int mgmtProcessKillQueryMsg(char *pMsg, int msgLen, SConnObj *pConn) { + int code = 0; + SKillQuery *pKill = (SKillQuery *)pMsg; + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + code = mgmtKillQuery(pKill->queryId, pConn); + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_KILL_QUERY_RSP, code); + + return 0; +} + +int mgmtProcessKillStreamMsg(char *pMsg, int msgLen, SConnObj *pConn) { + int code = 0; + SKillStream *pKill = (SKillStream *)pMsg; + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + code = mgmtKillStream(pKill->queryId, pConn); + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_KILL_STREAM_RSP, code); + + return 0; +} + +int mgmtProcessKillConnectionMsg(char *pMsg, int msgLen, SConnObj *pConn) { + int code = 0; + SKillConnection *pKill = (SKillConnection *)pMsg; + + if (!pConn->superAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + code = mgmtKillConnection(pKill->queryId, pConn); + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_KILL_CONNECTION_RSP, code); + + return 0; +} + +int mgmtProcessCreateUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SCreateUserMsg *pCreate = (SCreateUserMsg *)pMsg; + int code = 0; + + if (pConn->superAuth) { + code = mgmtCreateUser(&acctObj, pCreate->user, pCreate->pass); + if (code == TSDB_CODE_SUCCESS) { + mLPrint("user:%s is created by %s", pCreate->user, pConn->pUser->user); + } + } else { + code = TSDB_CODE_NO_RIGHTS; + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_USER_RSP, code); + + return 0; +} + +int mgmtProcessAlterUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SAlterUserMsg *pAlter = (SAlterUserMsg *)pMsg; + int code = 0; + SUserObj * pUser; + + pUser = mgmtGetUser(pAlter->user); + if (pUser == NULL) { + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, TSDB_CODE_INVALID_USER); + return 0; + } + + if (strcmp(pUser->user, "sys") == 0 || strcmp(pUser->user, "stream") == 0) { + code = TSDB_CODE_NO_RIGHTS; + } else if ((strcmp(pUser->user, pConn->pUser->user) == 0) || + ((strcmp(pUser->acct, acctObj.user) == 0) && pConn->superAuth) || + (strcmp(pConn->pUser->user, "root") == 0)) { + if ((pAlter->flag & TSDB_ALTER_USER_PASSWD) != 0) { + memset(pUser->pass, 0, sizeof(pUser->pass)); + strcpy(pUser->pass, pAlter->pass); + } + if ((pAlter->flag & TSDB_ALTER_USER_PRIVILEGES) != 0) { + if (pAlter->privilege == 1) { // super + pUser->superAuth = 1; + pUser->writeAuth = 1; + } + if (pAlter->privilege == 2) { // read + pUser->superAuth = 0; + pUser->writeAuth = 0; + } + if (pAlter->privilege == 3) { // write + pUser->superAuth = 0; + pUser->writeAuth = 1; + } + } + + code = mgmtUpdateUser(pUser); + mLPrint("user:%s is altered by %s", pAlter->user, pConn->pUser->user); + } else { + code = TSDB_CODE_NO_RIGHTS; + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_USER_RSP, code); + + return 0; +} + +int mgmtProcessDropUserMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SDropUserMsg *pDrop = (SDropUserMsg *)pMsg; + int code = 0; + + if (strcmp(pConn->pUser->user, pDrop->user) == 0) { + code = TSDB_CODE_NO_RIGHTS; + } else if (strcmp(pDrop->user, "sys") == 0 || strcmp(pDrop->user, "stream") == 0) { + code = TSDB_CODE_NO_RIGHTS; + } else { + if (pConn->superAuth) { + code = mgmtDropUser(&acctObj, pDrop->user); + if (code == 0) { + mLPrint("user:%s is dropped by %s", pDrop->user, pConn->pUser->user); + } + } else { + code = TSDB_CODE_NO_RIGHTS; + } + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_USER_RSP, code); + + return 0; +} + +int mgmtProcessDropDbMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SDropDbMsg *pDrop = (SDropDbMsg *)pMsg; + int code; + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + code = mgmtDropDbByName(&acctObj, pDrop->db); + if (code == 0) { + mLPrint("DB:%s is dropped by %s", pDrop->db, pConn->pUser->user); + } + } + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_DB_RSP, code); + + return 0; +} + +int mgmtProcessUseDbMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SUseDbMsg *pUse = (SUseDbMsg *)pMsg; + int code; + + code = mgmtUseDb(pConn, pUse->db); + if (code == 0) mTrace("DB is change to:%s by %s", pUse->db, pConn->pUser->user); + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_USE_DB_RSP, code); + + return 0; +} + +int (*mgmtGetMetaFp[])(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) = { + mgmtGetUserMeta, mgmtGetDbMeta, mgmtGetMeterMeta, mgmtGetDnodeMeta, mgmtGetVgroupMeta, + mgmtGetMetricMeta, mgmtGetQueryMeta, mgmtGetStreamMeta, mgmtGetConnsMeta, +}; + +int (*mgmtRetrieveFp[])(SShowObj *pShow, char *data, int rows, SConnObj *pConn) = { + mgmtRetrieveUsers, mgmtRetrieveDbs, mgmtRetrieveMeters, mgmtRetrieveDnodes, mgmtRetrieveVgroups, + mgmtRetrieveMetrics, mgmtRetrieveQueries, mgmtRetrieveStreams, mgmtRetrieveConns, +}; + +int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SShowMsg * pShowMsg = (SShowMsg *)pMsg; + STaosRsp * pRsp; + char * pStart; + int code = 0; + SShowRspMsg *pShowRsp; + SShowObj * pShow = NULL; + + int size = sizeof(STaosHeader) + sizeof(STaosRsp) + sizeof(SShowRspMsg) + sizeof(SSchema) * TSDB_MAX_COLUMNS + + TSDB_EXTRA_PAYLOAD_SIZE; + pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_SHOW_RSP, size); + if (pStart == NULL) return 0; + pMsg = pStart; + pRsp = (STaosRsp *)pMsg; + pMsg = (char *)pRsp->more; + + if (pShowMsg->type >= TSDB_MGMT_TABLE_MAX) { + code = -1; + } else { + pShow = (SShowObj *)calloc(1, sizeof(SShowObj) + htons(pShowMsg->payloadLen)); + pShow->signature = pShow; + pShow->type = pShowMsg->type; + mTrace("pShow:%p is allocated", pShow); + + // set the table name query condition + pShow->payloadLen = htons(pShowMsg->payloadLen); + memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen); + + pShowRsp = (SShowRspMsg *)pMsg; + pShowRsp->qhandle = (uint64_t)pShow; // qhandle; + pConn->qhandle = pShowRsp->qhandle; + + code = (*mgmtGetMetaFp[pShowMsg->type])(&pShowRsp->meterMeta, pShow, pConn); + if (code == 0) { + pMsg += sizeof(SShowRspMsg) + sizeof(SSchema) * pShow->numOfColumns; + } else { + mError("pShow:%p, failed to get Meta, code:%d", pShow, code); + free(pShow); + } + } + + pRsp->code = code; + msgLen = pMsg - pStart; + taosSendMsgToPeer(pConn->thandle, pStart, msgLen); + + return msgLen; +} + +int mgmtProcessRetrieveMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SRetrieveMeterMsg *pRetrieve; + SRetrieveMeterRsp *pRsp; + int rowsToRead = 0, size = 0, rowsRead = 0; + char * pStart; + int code = 0; + SShowObj * pShow; + + pRetrieve = (SRetrieveMeterMsg *)pMsg; + + /* + * in case of server restart, apps may hold qhandle created by server before restart, + * which is actually invalid, therefore, signature check is required. + */ + if (pRetrieve->qhandle != pConn->qhandle) { + mError("retrieve:%p, qhandle:%p is not matched with saved:%p", pRetrieve, pRetrieve->qhandle, pConn->qhandle); + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_MEMORY_CORRUPTED); + return -1; + } + + pShow = (SShowObj *)pRetrieve->qhandle; + if (pShow->signature != (void *)pShow) { + mError("pShow:%p, signature:%p, query memory is corrupted", pShow, pShow->signature); + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_MEMORY_CORRUPTED); + return -1; + } else { + if (pRetrieve->free == 0) rowsToRead = pShow->numOfRows - pShow->numOfReads; + + /* return no more than 100 meters in one round trip */ + if (rowsToRead > 100) rowsToRead = 100; + + /* + * the actual number of table may be larger than the value of pShow->numOfRows, if a query is + * issued during a continuous create table operation. Therefore, rowToRead may be less than 0. + */ + if (rowsToRead < 0) rowsToRead = 0; + size = pShow->rowSize * rowsToRead; + } + + pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, size + 100); + if (pStart == NULL) return 0; + pMsg = pStart; + + STaosRsp *pTaosRsp = (STaosRsp *)pStart; + pTaosRsp->code = code; + pMsg = pTaosRsp->more; + + if (code == 0) { + pRsp = (SRetrieveMeterRsp *)pMsg; + pMsg = pRsp->data; + + // if free flag is set, client wants to clean the resources + if (pRetrieve->free == 0) rowsRead = (*mgmtRetrieveFp[pShow->type])(pShow, pRsp->data, rowsToRead, pConn); + + if (rowsRead < 0) { + rowsRead = 0; + pTaosRsp->code = TSDB_CODE_ACTION_IN_PROGRESS; + } + + pRsp->numOfRows = htonl(rowsRead); + pRsp->precision = htonl(TSDB_TIME_PRECISION_MILLI); // millisecond time precision + pMsg += size; + } + + msgLen = pMsg - pStart; + taosSendMsgToPeer(pConn->thandle, pStart, msgLen); + + if (rowsToRead == 0) { + int64_t oldSign = __sync_val_compare_and_swap(&pShow->signature, (uint64_t)pShow, 0); + if (oldSign != (uint64_t)pShow) { + return msgLen; + } + // pShow->signature = 0; + mTrace("pShow:%p is released", pShow); + tfree(pShow); + } + + return msgLen; +} + +int mgmtProcessCreateTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SCreateTableMsg *pCreate = (SCreateTableMsg *)pMsg; + int code; + SSchema * pSchema; + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + pCreate->numOfColumns = htons(pCreate->numOfColumns); + pCreate->numOfTags = htons(pCreate->numOfTags); + + pCreate->sqlLen = htons(pCreate->sqlLen); + pSchema = pCreate->schema; + for (int i = 0; i < pCreate->numOfColumns + pCreate->numOfTags; ++i) { + pSchema->bytes = htons(pSchema->bytes); + pSchema->colId = i; + pSchema++; + } + + if (pConn->pDb) { + code = mgmtCreateMeter(pConn->pDb, pCreate); + if (code == 0) { + mTrace("meter:%s is created by %s", pCreate->meterId, pConn->pUser->user); + // mLPrint("meter:%s is created by %s", pCreate->meterId, pConn->pUser->user); + } + } else { + code = TSDB_CODE_DB_NOT_SELECTED; + } + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_TABLE_RSP, code); + + return 0; +} + +int mgmtProcessDropTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SDropTableMsg *pDrop = (SDropTableMsg *)pMsg; + int code; + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + code = mgmtDropMeter(pConn->pDb, pDrop->meterId, pDrop->igNotExists); + if (code == 0) { + mTrace("meter:%s is dropped by user:%s", pDrop->meterId, pConn->pUser->user); + // mLPrint("meter:%s is dropped by user:%s", pDrop->meterId, pConn->pUser->user); + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_TABLE_RSP, code); + } + + return 0; +} + +int mgmtProcessAlterTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { + SAlterTableMsg *pAlter = (SAlterTableMsg *)pMsg; + int code; + + if (!pConn->writeAuth) { + code = TSDB_CODE_NO_RIGHTS; + } else { + pAlter->type = htons(pAlter->type); + pAlter->numOfCols = htons(pAlter->numOfCols); + + if (pAlter->numOfCols > 2) { + mError("meter:%s error numOfCols:%d in alter table", pAlter->meterId, pAlter->numOfCols); + code = TSDB_CODE_APP_ERROR; + } else { + if (pConn->pDb) { + for (int32_t i = 0; i < pAlter->numOfCols; ++i) { + pAlter->schema[i].bytes = htons(pAlter->schema[i].bytes); + } + + code = mgmtAlterMeter(pConn->pDb, pAlter); + if (code == 0) { + mLPrint("meter:%s is altered by %s", pAlter->meterId, pConn->pUser->user); + } + } else { + code = TSDB_CODE_DB_NOT_SELECTED; + } + } + } + + taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_ALTER_TABLE_RSP, code); + + return 0; +} + +int mgmtProcessHeartBeatMsg(char *cont, int contLen, SConnObj *pConn) { + char * pStart, *pMsg; + int msgLen; + STaosRsp *pRsp; + + mgmtSaveQueryStreamList(cont, contLen, pConn); + + pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_HEARTBEAT_RSP, 128); + if (pStart == NULL) return 0; + pMsg = pStart; + pRsp = (STaosRsp *)pMsg; + pRsp->code = 0; + pMsg = (char *)pRsp->more; + + SHeartBeatRsp *pHBRsp = (SHeartBeatRsp *)pRsp->more; + pHBRsp->queryId = pConn->queryId; + pConn->queryId = 0; + pHBRsp->streamId = pConn->streamId; + pHBRsp->streamId = pConn->streamId; + pConn->streamId = 0; + pHBRsp->killConnection = pConn->killConnection; + + pMsg += sizeof(SHeartBeatRsp); + + msgLen = pMsg - pStart; + + taosSendMsgToPeer(pConn->thandle, pStart, msgLen); + + return 0; +} + +void mgmtEstablishConn(SConnObj *pConn) { + __sync_fetch_and_add(&mgmtShellConns, 1); + pConn->stime = taosGetTimestampMs(); + + if (strcmp(pConn->pUser->user, "root") == 0 || strcmp(pConn->pUser->user, acctObj.user) == 0) { + pConn->superAuth = 1; + pConn->writeAuth = 1; + } else { + pConn->superAuth = pConn->pUser->superAuth; + pConn->writeAuth = pConn->pUser->writeAuth; + if (pConn->superAuth) { + pConn->writeAuth = 1; + } + } + + uint32_t temp; + taosGetRpcConnInfo(pConn->thandle, &temp, &pConn->ip, &pConn->port, &temp, &temp); + mgmtAddConnIntoAcct(pConn); +} + +int mgmtRetriveUserAuthInfo(char *user, char *spi, char *encrypt, uint8_t *secret, uint8_t *ckey) { + SUserObj *pUser = NULL; + + *spi = 0; + *encrypt = 0; + secret[0] = 0; + ckey[0] = 0; + + pUser = mgmtGetUser(user); + if (pUser == NULL) return TSDB_CODE_INVALID_USER; + + *spi = 1; + *encrypt = 0; + memcpy(secret, pUser->pass, TSDB_KEY_LEN); + + return 0; +} + +int mgmtProcessConnectMsg(char *pMsg, int msgLen, SConnObj *pConn) { + STaosRsp * pRsp; + SConnectRsp *pConnectRsp; + SConnectMsg *pConnectMsg; + char * pStart; + int code = TSDB_CODE_INVALID_USER; + SAcctObj * pAcct = NULL; + SUserObj * pUser = NULL; + SDbObj * pDb = NULL; + char dbName[TSDB_METER_ID_LEN]; + + pConnectMsg = (SConnectMsg *)pMsg; + + pUser = mgmtGetUser(pConn->user); + if (pUser == NULL) { + code = TSDB_CODE_INVALID_USER; + goto _rsp; + } + + pAcct = &acctObj; + + if (pConnectMsg->db[0]) { + memset(dbName, 0, sizeof(dbName)); + sprintf(dbName, "%x%s%s", pAcct->acctId, TS_PATH_DELIMITER, pConnectMsg->db); + pDb = mgmtGetDb(dbName); + if (pDb == NULL) { + code = TSDB_CODE_INVALID_DB; + goto _rsp; + } + strcpy(pConn->db, dbName); + } + + if (pConn->pAcct) { + mgmtRemoveConnFromAcct(pConn); + __sync_fetch_and_sub(&mgmtShellConns, 1); + } + + code = 0; + pConn->pAcct = pAcct; + pConn->pDb = pDb; + pConn->pUser = pUser; + mgmtEstablishConn(pConn); + +_rsp: + pStart = taosBuildRspMsgWithSize(pConn->thandle, TSDB_MSG_TYPE_CONNECT_RSP, 128); + if (pStart == NULL) return 0; + + pMsg = pStart; + pRsp = (STaosRsp *)pMsg; + pRsp->code = code; + pMsg += sizeof(STaosRsp); + + if (code == 0) { + pConnectRsp = (SConnectRsp *)pRsp->more; + sprintf(pConnectRsp->acctId, "%x", pConn->pAcct->acctId); + strcpy(pConnectRsp->version, version); + pConnectRsp->writeAuth = pConn->writeAuth; + pConnectRsp->superAuth = pConn->superAuth; + pMsg += sizeof(SConnectRsp); + + // set the time resolution: millisecond or microsecond + *((uint32_t *)pMsg) = tsTimePrecision; + pMsg += sizeof(uint32_t); + + } else { + pConn->pAcct = NULL; + pConn->pUser = NULL; + } + + msgLen = pMsg - pStart; + taosSendMsgToPeer(pConn->thandle, pStart, msgLen); + + char ipstr[24]; + tinet_ntoa(ipstr, pConn->ip); + mLPrint("user:%s login from %s, code:%d", pConn->user, ipstr, code); + + return code; +} + +void *mgmtProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { + SIntMsg * pMsg = (SIntMsg *)msg; + SConnObj *pConn = (SConnObj *)ahandle; + + if (msg == NULL) { + if (pConn) { + mgmtRemoveConnFromAcct(pConn); + __sync_fetch_and_sub(&mgmtShellConns, 1); + mTrace("connection from %s is closed", pConn->pUser->user); + memset(pConn, 0, sizeof(SConnObj)); + } + + return NULL; + } + + if (pConn == NULL) { + pConn = connList + pMsg->destId; + pConn->thandle = thandle; + strcpy(pConn->user, pMsg->meterId); + } + + if (pMsg->msgType == TSDB_MSG_TYPE_CONNECT) { + (*mgmtProcessShellMsg[pMsg->msgType])((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pConn); + } else { + SMgmtHead *pHead = (SMgmtHead *)pMsg->content; + if (pConn->pAcct == NULL) { + pConn->pUser = mgmtGetUser(pConn->user); + if (pConn->pUser) { + pConn->pAcct = &acctObj; + mgmtEstablishConn(pConn); + mTrace("login from:%x:%d", pConn->ip, htons(pConn->port)); + } + } + + if (pConn->pAcct) { + if (strcmp(pConn->db, pHead->db) != 0) pConn->pDb = mgmtGetDb(pHead->db); + + char *cont = (char *)pMsg->content + sizeof(SMgmtHead); + int contLen = pMsg->msgLen - sizeof(SIntMsg) - sizeof(SMgmtHead); + if (pMsg->msgType == TSDB_MSG_TYPE_METERINFO || pMsg->msgType == TSDB_MSG_TYPE_METRIC_META || + pMsg->msgType == TSDB_MSG_TYPE_RETRIEVE || pMsg->msgType == TSDB_MSG_TYPE_SHOW) { + (*mgmtProcessShellMsg[pMsg->msgType])(cont, contLen, pConn); + } else { + if (mgmtProcessShellMsg[pMsg->msgType]) { + // TODO : put the msg in tran queue + SSchedMsg schedMsg; + schedMsg.msg = malloc(pMsg->msgLen); // Message to deal with + memcpy(schedMsg.msg, pMsg, pMsg->msgLen); + + schedMsg.fp = mgmtProcessTranRequest; + schedMsg.tfp = NULL; + schedMsg.thandle = pConn; + + taosScheduleTask(mgmtTranQhandle, &schedMsg); + } else { + mError("%s from shell is not processed", taosMsg[pMsg->msgType]); + } + } + } else { + taosSendSimpleRsp(thandle, pMsg->msgType + 1, TSDB_CODE_DISCONNECTED); + } + } + + if (pConn->pAcct == NULL) { + taosCloseRpcConn(pConn->thandle); + memset(pConn, 0, sizeof(SConnObj)); // close the connection; + pConn = NULL; + } + + return pConn; +} + +void mgmtInitProcessShellMsg() { + mgmtProcessShellMsg[TSDB_MSG_TYPE_METERINFO] = mgmtProcessMeterMetaMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_METRIC_META] = mgmtProcessMetricMetaMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_DB] = mgmtProcessCreateDbMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_ALTER_DB] = mgmtProcessAlterDbMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_USER] = mgmtProcessCreateUserMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_ALTER_USER] = mgmtProcessAlterUserMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_DB] = mgmtProcessDropDbMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_USER] = mgmtProcessDropUserMsg; + + mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_TABLE] = mgmtProcessCreateTableMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_TABLE] = mgmtProcessDropTableMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_ALTER_TABLE] = mgmtProcessAlterTableMsg; + + mgmtProcessShellMsg[TSDB_MSG_TYPE_USE_DB] = mgmtProcessUseDbMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_RETRIEVE] = mgmtProcessRetrieveMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_SHOW] = mgmtProcessShowMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_CONNECT] = mgmtProcessConnectMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_HEARTBEAT] = mgmtProcessHeartBeatMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_QUERY] = mgmtProcessKillQueryMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_STREAM] = mgmtProcessKillStreamMsg; + mgmtProcessShellMsg[TSDB_MSG_TYPE_KILL_CONNECTION] = mgmtProcessKillConnectionMsg; +} diff --git a/src/system/src/mgmtSystem.c b/src/system/src/mgmtSystem.c new file mode 100644 index 000000000000..7705c1cf91d4 --- /dev/null +++ b/src/system/src/mgmtSystem.c @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dnodeSystem.h" +#include "mgmt.h" +#include "tsdb.h" +#include "tsystem.h" +#include "vnode.h" + +// global, not configurable +char mgmtDirectory[128]; +void * mgmtTmr; +void * mgmtQhandle; +void * mgmtTranQhandle = NULL; +void * mgmtStatisticTimer = NULL; +void * mgmtStatusTimer = NULL; +int mgmtShellConns = 0; +extern void *pShellConn; +extern void *rpcQhandle; + +void mgmtCleanUpSystem() { + mTrace("mgmt is running, clean it up"); + taosTmrStopA(&mgmtStatisticTimer); + mgmtCleanUpShell(); + mgmtCleanUpMeters(); + mgmtCleanUpVgroups(); + mgmtCleanUpDbs(); + mgmtCleanUpUsers(); + taosTmrCleanUp(mgmtTmr); + taosCleanUpScheduler(mgmtQhandle); + taosCleanUpScheduler(mgmtTranQhandle); +} + +void mgmtDoStatistic(void *handle, void *tmrId) {} + +void mgmtProcessDnodeStatus(void *handle, void *tmrId) { + SDnodeObj *pObj = &dnodeObj; + pObj->openVnodes = tsOpenVnodes; + pObj->status = TSDB_STATUS_READY; + + float memoryUsedMB = 0; + taosGetSysMemory(&memoryUsedMB); + pObj->memoryAvailable = tsTotalMemoryMB - memoryUsedMB; + + float diskUsedGB = 0; + taosGetDisk(&diskUsedGB); + pObj->diskAvailable = tsTotalDiskGB - diskUsedGB; + + for (int vnode = 0; vnode < pObj->numOfVnodes; ++vnode) { + SVnodeLoad *pVload = &(pObj->vload[vnode]); + SVnodeObj * pVnode = vnodeList + vnode; + + // wait vnode dropped + if (pVload->dropStatus == TSDB_VN_STATUS_DROPPING) { + if (vnodeList[vnode].cfg.maxSessions <= 0) { + pVload->dropStatus = TSDB_VN_STATUS_READY; + pVload->status = TSDB_VN_STATUS_READY; + mPrint("vid:%d, drop finished", pObj->privateIp, vnode); + taosTmrStart(mgmtMonitorDbDrop, 10000, NULL, mgmtTmr); + } + } + + if (vnodeList[vnode].cfg.maxSessions <= 0) { + continue; + } + + pVload->vnode = vnode; + pVload->status = TSDB_VN_STATUS_READY; + pVload->totalStorage = pVnode->vnodeStatistic.totalStorage; + pVload->compStorage = pVnode->vnodeStatistic.compStorage; + pVload->pointsWritten = pVnode->vnodeStatistic.pointsWritten; + uint32_t vgId = pVnode->cfg.vgId; + + SVgObj *pVgroup = mgmtGetVgroup(vgId); + if (pVgroup == NULL) { + mError("vgroup:%d is not there, but associated with vnode %d", vgId, vnode); + pVload->dropStatus = TSDB_VN_STATUS_DROPPING; + continue; + } + + SDbObj *pDb = mgmtGetDb(pVgroup->dbName); + if (pDb == NULL) { + mError("vgroup:%d not belongs to any database, vnode:%d", vgId, vnode); + continue; + } + + if (pVload->vgId == 0 || pVload->dropStatus == TSDB_VN_STATUS_DROPPING) { + mError("vid:%d, mgmt not exist, drop it", vnode); + pVload->dropStatus = TSDB_VN_STATUS_DROPPING; + } + } + + taosTmrReset(mgmtProcessDnodeStatus, tsStatusInterval * 1000, NULL, mgmtTmr, &mgmtStatusTimer); + if (mgmtStatusTimer == NULL) { + mError("Failed to start status timer"); + } +} + +int mgmtInitSystem() { + mPrint("starting to initialize TDengine mgmt ..."); + + struct stat dirstat; + + if (stat(mgmtDirectory, &dirstat) < 0) mkdir(mgmtDirectory, 0755); + + int numOfThreads = tsNumOfCores * tsNumOfThreadsPerCore / 2.0; + if (numOfThreads < 1) numOfThreads = 1; + mgmtQhandle = taosInitScheduler(tsMaxDnodes + tsMaxShellConns, numOfThreads, "mnode"); + + mgmtTranQhandle = taosInitScheduler(tsMaxDnodes + tsMaxShellConns, 1, "mnodeT"); + + mgmtTmr = taosTmrInit((tsMaxDnodes + tsMaxShellConns) * 3, 200, 3600000, "MND"); + if (mgmtTmr == NULL) { + mError("failed to init timer, exit"); + return -1; + } + + dnodeObj.lastReboot = tsRebootTime; + dnodeObj.numOfCores = (uint16_t)tsNumOfCores; + if (dnodeObj.numOfVnodes == TSDB_INVALID_VNODE_NUM) { + mgmtSetDnodeMaxVnodes(&dnodeObj); + mPrint("first access, set total vnodes:%d", dnodeObj.numOfVnodes); + } + + if (mgmtInitUsers() < 0) { + mError("failed to init users"); + return -1; + } + + if (mgmtInitDbs() < 0) { + mError("failed to init dbs"); + return -1; + } + + if (mgmtInitVgroups() < 0) { + mError("failed to init vgroups"); + return -1; + } + + if (mgmtInitMeters() < 0) { + mError("failed to init meters"); + return -1; + } + + if (mgmtInitShell() < 0) { + mError("failed to init shell"); + return -1; + } + + mgmtCheckAcct(); + + taosTmrReset(mgmtDoStatistic, tsStatusInterval * 30000, NULL, mgmtTmr, &mgmtStatisticTimer); + + taosTmrReset(mgmtProcessDnodeStatus, 500, NULL, mgmtTmr, &mgmtStatusTimer); + + mPrint("TDengine mgmt is initialized successfully"); + + return 0; +} diff --git a/src/system/src/mgmtUser.c b/src/system/src/mgmtUser.c new file mode 100644 index 000000000000..437177291b65 --- /dev/null +++ b/src/system/src/mgmtUser.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include + +#include "mgmt.h" +#include "tschemautil.h" + +void *userSdb = NULL; +int tsUserUpdateSize; + +void *(*mgmtUserActionFp[SDB_MAX_ACTION_TYPES])(void *row, char *str, int size, int *ssize); +void *mgmtUserActionInsert(void *row, char *str, int size, int *ssize); +void *mgmtUserActionDelete(void *row, char *str, int size, int *ssize); +void *mgmtUserActionUpdate(void *row, char *str, int size, int *ssize); +void *mgmtUserActionEncode(void *row, char *str, int size, int *ssize); +void *mgmtUserActionDecode(void *row, char *str, int size, int *ssize); +void *mgmtUserActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtUserActionBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtUserActionAfterBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtUserActionReset(void *row, char *str, int size, int *ssize); +void *mgmtUserActionDestroy(void *row, char *str, int size, int *ssize); + +void mgmtUserActionInit() { + mgmtUserActionFp[SDB_TYPE_INSERT] = mgmtUserActionInsert; + mgmtUserActionFp[SDB_TYPE_DELETE] = mgmtUserActionDelete; + mgmtUserActionFp[SDB_TYPE_UPDATE] = mgmtUserActionUpdate; + mgmtUserActionFp[SDB_TYPE_ENCODE] = mgmtUserActionEncode; + mgmtUserActionFp[SDB_TYPE_DECODE] = mgmtUserActionDecode; + mgmtUserActionFp[SDB_TYPE_BEFORE_BATCH_UPDATE] = mgmtUserActionBeforeBatchUpdate; + mgmtUserActionFp[SDB_TYPE_BATCH_UPDATE] = mgmtUserActionBatchUpdate; + mgmtUserActionFp[SDB_TYPE_AFTER_BATCH_UPDATE] = mgmtUserActionAfterBatchUpdate; + mgmtUserActionFp[SDB_TYPE_RESET] = mgmtUserActionReset; + mgmtUserActionFp[SDB_TYPE_DESTROY] = mgmtUserActionDestroy; +} + +void *mgmtUserAction(char action, void *row, char *str, int size, int *ssize) { + if (mgmtUserActionFp[action] != NULL) { + return (*(mgmtUserActionFp[action]))(row, str, size, ssize); + } + return NULL; +} + +int mgmtInitUsers() { + void * pNode = NULL; + SUserObj *pUser = NULL; + SAcctObj *pAcct = NULL; + int numOfUsers = 0; + + mgmtUserActionInit(); + + userSdb = sdbOpenTable(tsMaxUsers, sizeof(SUserObj), "user", SDB_KEYTYPE_STRING, mgmtDirectory, mgmtUserAction); + if (userSdb == NULL) { + mError("failed to init user data"); + return -1; + } + + while (1) { + pNode = sdbFetchRow(userSdb, pNode, (void **)&pUser); + if (pUser == NULL) break; + + pUser->prev = NULL; + pUser->next = NULL; + + pAcct = &acctObj; + mgmtAddUserIntoAcct(pAcct, pUser); + + numOfUsers++; + } + + SUserObj tObj; + tsUserUpdateSize = tObj.updateEnd - (char *)&tObj; + + mTrace("user data is initialized"); + return 0; +} + +SUserObj *mgmtGetUser(char *name) { return (SUserObj *)sdbGetRow(userSdb, name); } + +int mgmtUpdateUser(SUserObj *pUser) { return sdbUpdateRow(userSdb, pUser, 0, 1); } + +int mgmtCreateUser(SAcctObj *pAcct, char *name, char *pass) { + SUserObj *pUser; + + int numOfUsers = sdbGetNumOfRows(userSdb); + if (numOfUsers >= tsMaxUsers) { + mWarn("numOfUsers:%d, exceed tsMaxUsers:%d", numOfUsers, tsMaxUsers); + return TSDB_CODE_TOO_MANY_USERS; + } + + pUser = (SUserObj *)sdbGetRow(userSdb, name); + if (pUser != NULL) { + mWarn("user:%s is already there", name); + return TSDB_CODE_USER_ALREADY_EXIST; + } + + pUser = malloc(sizeof(SUserObj)); + memset(pUser, 0, sizeof(SUserObj)); + strcpy(pUser->user, name); + strcpy(pUser->pass, pass); + strcpy(pUser->acct, pAcct->user); + pUser->createdTime = taosGetTimestampMs(); + pUser->superAuth = 0; + pUser->writeAuth = 1; + if (strcmp(pUser->user, "root") == 0 || strcmp(pUser->user, pUser->acct) == 0) { + pUser->superAuth = 1; + } + + int code = TSDB_CODE_SUCCESS; + if (sdbInsertRow(userSdb, pUser, 0) < 0) { + tfree(pUser); + code = TSDB_CODE_SDB_ERROR; + } + + // mgmtAddUserIntoAcct(pAcct, pUser); + + return code; +} + +int mgmtDropUser(SAcctObj *pAcct, char *name) { + SUserObj *pUser; + + pUser = (SUserObj *)sdbGetRow(userSdb, name); + if (pUser == NULL) { + mWarn("user:%s is not there", name); + return TSDB_CODE_INVALID_USER; + } + + if (strcmp(pAcct->user, pUser->acct) != 0) return TSDB_CODE_NO_RIGHTS; + + // mgmtRemoveUserFromAcct(pAcct, pUser); + sdbDeleteRow(userSdb, pUser); + + return 0; +} + +void mgmtCleanUpUsers() { sdbCloseTable(userSdb); } + +int mgmtGetUserMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = TSDB_USER_LEN; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "name"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 6; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "privilege"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "created time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + // pShow->numOfRows = sdbGetNumOfRows (userSdb); + pShow->numOfRows = pConn->pAcct->acctInfo.numOfUsers; + pShow->pNode = pConn->pAcct->pUser; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + return 0; +} + +int mgmtRetrieveUsers(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + SUserObj *pUser = NULL; + char * pWrite; + int cols = 0; + + while (numOfRows < rows) { + // pShow->pNode = sdbFetchRow(userSdb, pShow->pNode, (void **)&pUser); + pUser = (SUserObj *)pShow->pNode; + if (pUser == NULL) break; + pShow->pNode = (void *)pUser->next; + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + strcpy(pWrite, pUser->user); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + if (pUser->superAuth) { + strcpy(pWrite, "super"); + } else if (pUser->writeAuth) { + strcpy(pWrite, "write"); + } else { + strcpy(pWrite, "read"); + } + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pUser->createdTime; + cols++; + + numOfRows++; + } + pShow->numOfReads += numOfRows; + return numOfRows; +} + +void *mgmtUserActionInsert(void *row, char *str, int size, int *ssize) { + SUserObj *pUser = (SUserObj *)row; + SAcctObj *pAcct = &acctObj; + mgmtAddUserIntoAcct(pAcct, pUser); + + return NULL; +} +void *mgmtUserActionDelete(void *row, char *str, int size, int *ssize) { + SUserObj *pUser = (SUserObj *)row; + SAcctObj *pAcct = &acctObj; + mgmtRemoveUserFromAcct(pAcct, pUser); + + return NULL; +} +void *mgmtUserActionUpdate(void *row, char *str, int size, int *ssize) { + return mgmtUserActionReset(row, str, size, ssize); +} +void *mgmtUserActionEncode(void *row, char *str, int size, int *ssize) { + SUserObj *pUser = (SUserObj *)row; + int tsize = pUser->updateEnd - (char *)pUser; + if (size < tsize) { + *ssize = -1; + } else { + memcpy(str, pUser, tsize); + *ssize = tsize; + } + return NULL; +} +void *mgmtUserActionDecode(void *row, char *str, int size, int *ssize) { + SUserObj *pUser = (SUserObj *)malloc(sizeof(SUserObj)); + if (pUser == NULL) return NULL; + memset(pUser, 0, sizeof(SUserObj)); + + int tsize = pUser->updateEnd - (char *)pUser; + memcpy(pUser, str, tsize); + return (void *)pUser; +} +void *mgmtUserActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtUserActionBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtUserActionAfterBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtUserActionReset(void *row, char *str, int size, int *ssize) { + SUserObj *pUser = (SUserObj *)row; + int tsize = pUser->updateEnd - (char *)pUser; + memcpy(pUser, str, tsize); + + return NULL; +} + +void *mgmtUserActionDestroy(void *row, char *str, int size, int *ssize) { + tfree(row); + return NULL; +} diff --git a/src/system/src/mgmtUtil.c b/src/system/src/mgmtUtil.c new file mode 100644 index 000000000000..318e95db2c80 --- /dev/null +++ b/src/system/src/mgmtUtil.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "mgmt.h" + +extern int cksumsize; + +uint64_t mgmtGetCheckSum(FILE* fp, int offset) { + uint64_t checksum = 0; + uint64_t data; + int bytes; + + while (1) { + data = 0; + bytes = fread(&data, sizeof(data), 1, fp); + + if (bytes != sizeof(data)) break; + + checksum += data; + } + + return checksum; +} + +bool mgmtMeterCreateFromMetric(STabObj* pMeterObj) { return pMeterObj->meterType == TSDB_METER_MTABLE; } + +bool mgmtIsMetric(STabObj* pMeterObj) { return pMeterObj->meterType == TSDB_METER_METRIC; } + +bool mgmtIsNormalMeter(STabObj* pMeterObj) { return !mgmtIsMetric(pMeterObj); } diff --git a/src/system/src/mgmtVgroup.c b/src/system/src/mgmtVgroup.c new file mode 100644 index 000000000000..357715306757 --- /dev/null +++ b/src/system/src/mgmtVgroup.c @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include + +#include "mgmt.h" +#include "tschemautil.h" + +void * vgSdb = NULL; +int tsVgUpdateSize; +extern void *dbSdb; +extern void *acctSdb; +extern void *userSdb; +extern void *dnodeSdb; + +void *(*mgmtVgroupActionFp[SDB_MAX_ACTION_TYPES])(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionInsert(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionDelete(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionUpdate(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionEncode(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionDecode(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionAfterBatchUpdate(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionReset(void *row, char *str, int size, int *ssize); +void *mgmtVgroupActionDestroy(void *row, char *str, int size, int *ssize); + +void mgmtVgroupActionInit() { + mgmtVgroupActionFp[SDB_TYPE_INSERT] = mgmtVgroupActionInsert; + mgmtVgroupActionFp[SDB_TYPE_DELETE] = mgmtVgroupActionDelete; + mgmtVgroupActionFp[SDB_TYPE_UPDATE] = mgmtVgroupActionUpdate; + mgmtVgroupActionFp[SDB_TYPE_ENCODE] = mgmtVgroupActionEncode; + mgmtVgroupActionFp[SDB_TYPE_DECODE] = mgmtVgroupActionDecode; + mgmtVgroupActionFp[SDB_TYPE_BEFORE_BATCH_UPDATE] = mgmtVgroupActionBeforeBatchUpdate; + mgmtVgroupActionFp[SDB_TYPE_BATCH_UPDATE] = mgmtVgroupActionBatchUpdate; + mgmtVgroupActionFp[SDB_TYPE_AFTER_BATCH_UPDATE] = mgmtVgroupActionAfterBatchUpdate; + mgmtVgroupActionFp[SDB_TYPE_RESET] = mgmtVgroupActionReset; + mgmtVgroupActionFp[SDB_TYPE_DESTROY] = mgmtVgroupActionDestroy; +} + +void *mgmtVgroupAction(char action, void *row, char *str, int size, int *ssize) { + if (mgmtVgroupActionFp[action] != NULL) { + return (*(mgmtVgroupActionFp[action]))(row, str, size, ssize); + } + return NULL; +} + +int mgmtInitVgroups() { + void * pNode = NULL; + SVgObj *pVgroup = NULL; + + mgmtVgroupActionInit(); + + vgSdb = sdbOpenTable(tsMaxVGroups, sizeof(SVgObj), "vgroups", SDB_KEYTYPE_AUTO, mgmtDirectory, mgmtVgroupAction); + if (vgSdb == NULL) { + mError("failed to init vgroup data"); + return -1; + } + + while (1) { + pNode = sdbFetchRow(vgSdb, pNode, (void **)&pVgroup); + if (pVgroup == NULL) break; + + SDbObj *pDb = mgmtGetDb(pVgroup->dbName); + if (pDb == NULL) continue; + + pVgroup->prev = NULL; + pVgroup->next = NULL; + int size = sizeof(STabObj *) * pDb->cfg.maxSessions; + pVgroup->meterList = (STabObj **)malloc(size); + memset(pVgroup->meterList, 0, size); + + pVgroup->idPool = taosInitIdPool(pDb->cfg.maxSessions); + taosIdPoolReinit(pVgroup->idPool); + + mgmtSetDnodeVgid(pVgroup->vnodeGid[0].vnode, pVgroup->vgId); + } + + SVgObj tObj; + tsVgUpdateSize = tObj.updateEnd - (char *)&tObj; + + mTrace("vgroup is initialized"); + return 0; +} + +SVgObj *mgmtGetVgroup(int vgId) { return (SVgObj *)sdbGetRow(vgSdb, &vgId); } + +void mgmtProcessVgTimer(void *handle, void *tmrId) { + SDbObj *pDb = (SDbObj *)handle; + if (pDb == NULL) return; + + if (pDb->vgStatus > TSDB_VG_STATUS_IN_PROGRESS) { + mTrace("db:%s, set vgstatus from %d to %d", pDb->name, pDb->vgStatus, TSDB_VG_STATUS_READY); + pDb->vgStatus = TSDB_VG_STATUS_READY; + } + + pDb->vgTimer = NULL; +} + +bool mgmtAllocateVnode(SVgObj *pVgroup) { + int selectedVnode = -1; + SDnodeObj *pDnode = &dnodeObj; + + for (int i = 0; i < pDnode->numOfVnodes; i++) { + int vnode = (i + pDnode->lastAllocVnode) % pDnode->numOfVnodes; + if (pDnode->vload[vnode].vgId == 0 && pDnode->vload[vnode].status == TSDB_VN_STATUS_READY) { + selectedVnode = vnode; + break; + } + } + + if (selectedVnode == -1) { + mError("vgroup:%d alloc vnode failed, free vnodes:%d", pVgroup->vgId, pDnode->numOfFreeVnodes); + return false; + } else { + mTrace("vgroup:%d allocate vnode:%d, last allocated vnode:%d", pVgroup->vgId, selectedVnode, + pDnode->lastAllocVnode); + pVgroup->vnodeGid[0].vnode = selectedVnode; + pDnode->lastAllocVnode = selectedVnode + 1; + if (pDnode->lastAllocVnode >= pDnode->numOfVnodes) pDnode->lastAllocVnode = 0; + return true; + } +} + +SVgObj *mgmtCreateVgroup(SDbObj *pDb) { + SVgObj *pVgroup; + int size; + + size = sizeof(SVgObj); + pVgroup = (SVgObj *)malloc(size); + memset(pVgroup, 0, size); + strcpy(pVgroup->dbName, pDb->name); + pVgroup->numOfVnodes = 1; + pVgroup->createdTime = taosGetTimestampMs(); + + if (!mgmtAllocateVnode(pVgroup)) { + mWarn("no enough free dnode"); + free(pVgroup); + pDb->vgStatus = TSDB_VG_STATUS_FULL; + taosTmrReset(mgmtProcessVgTimer, 5000, pDb, mgmtTmr, &pDb->vgTimer); + return NULL; + } + + sdbInsertRow(vgSdb, pVgroup, 0); + + mTrace("vgroup:%d vnode:%d db:%s is created", pVgroup->vgId, pVgroup->vnodeGid[0].vnode, pDb->name); + + mgmtSendVPeersMsg(pVgroup, pDb); + + return pVgroup; +} + +int mgmtDropVgroup(SDbObj *pDb, SVgObj *pVgroup) { + STabObj *pMeter; + + if (pVgroup->numOfMeters > 0) { + for (int i = 0; i < pDb->cfg.maxSessions; ++i) { + if (pVgroup->meterList != NULL) { + pMeter = pVgroup->meterList[i]; + if (pMeter) mgmtDropMeter(pDb, pMeter->meterId, 0); + } + } + } + + mgmtSendFreeVnodeMsg(pVgroup->vnodeGid[0].vnode); + sdbDeleteRow(vgSdb, pVgroup); + + return 0; +} + +void mgmtSetVgroupIdPool() { + void * pNode = NULL; + SVgObj *pVgroup = NULL; + SDbObj *pDb; + + while (1) { + pNode = sdbFetchRow(vgSdb, pNode, (void **)&pVgroup); + if (pVgroup == NULL || pVgroup->idPool == 0) break; + + taosIdPoolSetFreeList(pVgroup->idPool); + pVgroup->numOfMeters = taosIdPoolNumOfUsed(pVgroup->idPool); + + pDb = mgmtGetDb(pVgroup->dbName); + pDb->numOfTables += pVgroup->numOfMeters; + if (pVgroup->numOfMeters >= pDb->cfg.maxSessions - 1) + mgmtAddVgroupIntoDbTail(pDb, pVgroup); + else + mgmtAddVgroupIntoDb(pDb, pVgroup); + } +} + +void mgmtCleanUpVgroups() { sdbCloseTable(vgSdb); } + +int mgmtGetVgroupMeta(SMeterMeta *pMeta, SShowObj *pShow, SConnObj *pConn) { + int cols = 0; + + if (pConn->pDb == NULL) return TSDB_CODE_DB_NOT_SELECTED; + + SSchema *pSchema = tsGetSchema(pMeta); + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "vgId"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "meters"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 2; + pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT; + strcpy(pSchema[cols].name, "vnode"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int i = 1; i < cols; ++i) pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + + pShow->numOfRows = pConn->pDb->numOfVgroups; + pShow->pNode = pConn->pDb->pHead; + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + return 0; +} + +int mgmtRetrieveVgroups(SShowObj *pShow, char *data, int rows, SConnObj *pConn) { + int numOfRows = 0; + SVgObj *pVgroup = NULL; + char * pWrite; + int cols = 0; + char ipstr[20]; + + while (numOfRows < rows) { + pVgroup = (SVgObj *)pShow->pNode; + if (pVgroup == NULL) break; + pShow->pNode = (void *)pVgroup->next; + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pVgroup->vgId; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pVgroup->numOfMeters; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int16_t *)pWrite = pVgroup->vnodeGid[0].vnode; + cols++; + + numOfRows++; + } + + pShow->numOfReads += numOfRows; + return numOfRows; +} + +void *mgmtVgroupActionInsert(void *row, char *str, int size, int *ssize) { + SVgObj *pVgroup = (SVgObj *)row; + SDbObj *pDb = mgmtGetDb(pVgroup->dbName); + + if (pDb == NULL) return NULL; + + int tsize = sizeof(STabObj *) * pDb->cfg.maxSessions; + pVgroup->meterList = (STabObj **)malloc(tsize); + memset(pVgroup->meterList, 0, tsize); + pVgroup->numOfMeters = 0; + pVgroup->idPool = taosInitIdPool(pDb->cfg.maxSessions); + mgmtAddVgroupIntoDb(pDb, pVgroup); + mgmtSetDnodeVgid(pVgroup->vnodeGid[0].vnode, pVgroup->vgId); + + return NULL; +} + +void *mgmtVgroupActionDelete(void *row, char *str, int size, int *ssize) { + SVgObj *pVgroup = (SVgObj *)row; + SDbObj *pDb = mgmtGetDb(pVgroup->dbName); + + if (pDb != NULL) mgmtRemoveVgroupFromDb(pDb, pVgroup); + mgmtUnSetDnodeVgid(pVgroup->vnodeGid[0].vnode); + tfree(pVgroup->meterList); + + return NULL; +} + +void *mgmtVgroupActionUpdate(void *row, char *str, int size, int *ssize) { + mgmtVgroupActionReset(row, str, size, ssize); + SVgObj *pVgroup = (SVgObj *)row; + + mTrace("vgroup:%d update, numOfVnode:%d", pVgroup->vgId, pVgroup->numOfVnodes); + + return NULL; +} +void *mgmtVgroupActionEncode(void *row, char *str, int size, int *ssize) { + SVgObj *pVgroup = (SVgObj *)row; + int tsize = pVgroup->updateEnd - (char *)pVgroup; + if (size < tsize) { + *ssize = -1; + } else { + memcpy(str, pVgroup, tsize); + *ssize = tsize; + } + + return NULL; +} +void *mgmtVgroupActionDecode(void *row, char *str, int size, int *ssize) { + SVgObj *pVgroup = (SVgObj *)malloc(sizeof(SVgObj)); + if (pVgroup == NULL) return NULL; + memset(pVgroup, 0, sizeof(SVgObj)); + + int tsize = pVgroup->updateEnd - (char *)pVgroup; + memcpy(pVgroup, str, tsize); + + return (void *)pVgroup; +} +void *mgmtVgroupActionBeforeBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtVgroupActionBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtVgroupActionAfterBatchUpdate(void *row, char *str, int size, int *ssize) { return NULL; } +void *mgmtVgroupActionReset(void *row, char *str, int size, int *ssize) { + SVgObj *pVgroup = (SVgObj *)row; + int tsize = pVgroup->updateEnd - (char *)pVgroup; + + memcpy(pVgroup, str, tsize); + + return NULL; +} +void *mgmtVgroupActionDestroy(void *row, char *str, int size, int *ssize) { + SVgObj *pVgroup = (SVgObj *)row; + if (pVgroup->idPool) { + taosIdPoolCleanUp(pVgroup->idPool); + pVgroup->idPool = NULL; + } + if (pVgroup->meterList) tfree(pVgroup->meterList); + tfree(row); + return NULL; +} \ No newline at end of file diff --git a/src/system/src/vnodeCache.c b/src/system/src/vnodeCache.c new file mode 100644 index 000000000000..c67f548beea5 --- /dev/null +++ b/src/system/src/vnodeCache.c @@ -0,0 +1,947 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "taosmsg.h" +#include "vnode.h" +#include "vnodeCache.h" +#include "vnodeUtil.h" + +void vnodeSearchPointInCache(SMeterObj *pObj, SQuery *pQuery); +void vnodeProcessCommitTimer(void *param, void *tmrId); + +void *vnodeOpenCachePool(int vnode) { + SCachePool *pCachePool; + SVnodeCfg * pCfg = &vnodeList[vnode].cfg; + int blockId = 0; + char * pMem = NULL; + + pCachePool = (SCachePool *)malloc(sizeof(SCachePool)); + if (pCachePool == NULL) { + dError("no memory to allocate cache pool!"); + return NULL; + } + + memset(pCachePool, 0, sizeof(SCachePool)); + pCachePool->count = 1; + pCachePool->vnode = vnode; + + pthread_mutex_init(&(pCachePool->vmutex), NULL); + + size_t size = sizeof(char *) * pCfg->cacheNumOfBlocks.totalBlocks; + pCachePool->pMem = malloc(size); + if (pCachePool->pMem == NULL) { + dError("no memory to allocate cache blocks!"); + pthread_mutex_destroy(&(pCachePool->vmutex)); + tfree(pCachePool); + return NULL; + } + + memset(pCachePool->pMem, 0, size); + pCachePool->threshold = pCfg->cacheNumOfBlocks.totalBlocks * 0.6; + + int maxAllocBlock = (1024 * 1024 * 1024) / pCfg->cacheBlockSize; + if (maxAllocBlock < 1) { + dError("Cache block size is too large"); + pthread_mutex_destroy(&(pCachePool->vmutex)); + tfree(pCachePool->pMem); + tfree(pCachePool); + return NULL; + } + while (blockId < pCfg->cacheNumOfBlocks.totalBlocks) { + // TODO : Allocate real blocks + int allocBlocks = MIN(pCfg->cacheNumOfBlocks.totalBlocks - blockId, maxAllocBlock); + pMem = calloc(allocBlocks, pCfg->cacheBlockSize); + if (pMem == NULL) { + dError("failed to allocate cache memory"); + goto _err_exit; + } + + for (int i = 0; i < allocBlocks; i++) { + pCachePool->pMem[blockId] = pMem + i * pCfg->cacheBlockSize; + blockId++; + } + } + + dTrace("vid:%d, cache pool is allocated:0x%x", vnode, pCachePool); + + return pCachePool; + +_err_exit: + pthread_mutex_destroy(&(pCachePool->vmutex)); + // TODO : Free the cache blocks and return + blockId = 0; + while (blockId < pCfg->cacheNumOfBlocks.totalBlocks) { + tfree(pCachePool->pMem[blockId]); + blockId = blockId + (MIN(maxAllocBlock, pCfg->cacheNumOfBlocks.totalBlocks - blockId)); + } + tfree(pCachePool->pMem); + tfree(pCachePool); + return NULL; +} + +void vnodeCloseCachePool(int vnode) { + SVnodeObj * pVnode = vnodeList + vnode; + SCachePool *pCachePool = (SCachePool *)pVnode->pCachePool; + int blockId = 0; + + taosTmrStopA(&pVnode->commitTimer); + if (pVnode->commitInProcess) pthread_cancel(pVnode->commitThread); + + dTrace("vid:%d, cache pool closed, count:%d", vnode, pCachePool->count); + + int maxAllocBlock = (1024 * 1024 * 1024) / pVnode->cfg.cacheBlockSize; + while (blockId < pVnode->cfg.cacheNumOfBlocks.totalBlocks) { + tfree(pCachePool->pMem[blockId]); + blockId = blockId + (MIN(maxAllocBlock, pVnode->cfg.cacheNumOfBlocks.totalBlocks - blockId)); + } + tfree(pCachePool->pMem); + pthread_mutex_destroy(&(pCachePool->vmutex)); + tfree(pCachePool); + pVnode->pCachePool = NULL; +} + +void *vnodeAllocateCacheInfo(SMeterObj *pObj) { + SCacheInfo *pInfo; + size_t size; + SVnodeCfg * pCfg = &vnodeList[pObj->vnode].cfg; + + size = sizeof(SCacheInfo); + pInfo = (SCacheInfo *)malloc(size); + if (pInfo == NULL) { + dError("id:%s, no memory for cacheInfo", pObj->meterId); + return NULL; + } + memset(pInfo, 0, size); + pInfo->maxBlocks = vnodeList[pObj->vnode].cfg.blocksPerMeter; + size = sizeof(SCacheBlock *) * pInfo->maxBlocks; + pInfo->cacheBlocks = (SCacheBlock **)malloc(size); + if (pInfo->cacheBlocks == NULL) { + dError("id:%s, no memory for cacheBlocks", pObj->meterId); + return NULL; + } + memset(pInfo->cacheBlocks, 0, size); + pInfo->currentSlot = -1; + + pObj->pointsPerBlock = + (pCfg->cacheBlockSize - sizeof(SCacheBlock) - pObj->numOfColumns * sizeof(char *)) / pObj->bytesPerPoint; + if (pObj->pointsPerBlock > pObj->pointsPerFileBlock) pObj->pointsPerBlock = pObj->pointsPerFileBlock; + pObj->pCache = (void *)pInfo; + + pObj->freePoints = pObj->pointsPerBlock * pInfo->maxBlocks; + + return (void *)pInfo; +} + +int vnodeFreeCacheBlock(SCacheBlock *pCacheBlock) { + SMeterObj * pObj; + SCacheInfo *pInfo; + + if (pCacheBlock == NULL) return -1; + + pObj = pCacheBlock->pMeterObj; + pInfo = (SCacheInfo *)pObj->pCache; + + if (pObj) { + pInfo->numOfBlocks--; + + if (pInfo->numOfBlocks < 0) { + dError("vid:%d sid:%d id:%s, numOfBlocks:%d shall never be negative", pObj->vnode, pObj->sid, pObj->meterId, + pInfo->numOfBlocks); + } + + if (pCacheBlock->blockId == 0) { + dError("vid:%d sid:%d id:%s, double free", pObj->vnode, pObj->sid, pObj->meterId); + } + + SCachePool *pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; + if (pCacheBlock->notFree) { + pPool->notFreeSlots--; + dTrace("vid:%d sid:%d id:%s, cache block is not free, slot:%d, index:%d notFreeSlots:%d", + pObj->vnode, pObj->sid, pObj->meterId, pCacheBlock->slot, pCacheBlock->index, pPool->notFreeSlots); + } + + dTrace("vid:%d sid:%d id:%s, free a cache block, numOfBlocks:%d, slot:%d, index:%d notFreeSlots:%d", + pObj->vnode, pObj->sid, pObj->meterId, pInfo->numOfBlocks, pCacheBlock->slot, pCacheBlock->index, + pPool->notFreeSlots); + + memset(pCacheBlock, 0, sizeof(SCacheBlock)); + + } else { + dError("BUG, pObj is null"); + } + + return 0; +} + +void vnodeFreeCacheInfo(SMeterObj *pObj) { + SCacheInfo * pInfo; + SCacheBlock *pCacheBlock; + SCachePool * pPool; + int slot, numOfBlocks; + + if (pObj == NULL || pObj->pCache == NULL) return; + + pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; + pInfo = (SCacheInfo *)pObj->pCache; + if (pPool == NULL || pInfo == NULL) return; + + pthread_mutex_lock(&pPool->vmutex); + numOfBlocks = pInfo->numOfBlocks; + slot = pInfo->currentSlot; + + for (int i = 0; i < numOfBlocks; ++i) { + pCacheBlock = pInfo->cacheBlocks[slot]; + vnodeFreeCacheBlock(pCacheBlock); + slot = (slot - 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + } + + pObj->pCache = NULL; + tfree(pInfo->cacheBlocks); + tfree(pInfo); + pthread_mutex_unlock(&pPool->vmutex); +} + +uint64_t vnodeGetPoolCount(SVnodeObj *pVnode) { + SCachePool *pPool; + + pPool = (SCachePool *)pVnode->pCachePool; + + return pPool->count; +} + +void vnodeUpdateCommitInfo(SMeterObj *pObj, int slot, int pos, uint64_t count) { + SCacheInfo * pInfo; + SCacheBlock *pBlock; + SCachePool * pPool; + + pInfo = (SCacheInfo *)pObj->pCache; + pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; + + int tslot = + (pInfo->commitPoint == pObj->pointsPerBlock) ? (pInfo->commitSlot + 1) % pInfo->maxBlocks : pInfo->commitSlot; + int slots = 0; + + while (tslot != slot || ((tslot == slot) && (pos == pObj->pointsPerBlock))) { + slots++; + pthread_mutex_lock(&pPool->vmutex); + pBlock = pInfo->cacheBlocks[tslot]; + assert(pBlock->notFree); + pBlock->notFree = 0; + pInfo->unCommittedBlocks--; + pPool->notFreeSlots--; + pthread_mutex_unlock(&pPool->vmutex); + + dTrace("vid:%d sid:%d id:%s, cache block is committed, slot:%d, index:%d notFreeSlots:%d, unCommittedBlocks:%d", + pObj->vnode, pObj->sid, pObj->meterId, pBlock->slot, pBlock->index, pPool->notFreeSlots, + pInfo->unCommittedBlocks); + if (tslot == slot) break; + tslot = (tslot + 1) % pInfo->maxBlocks; + } + + __sync_fetch_and_add(&pObj->freePoints, pObj->pointsPerBlock * slots); + pInfo->commitSlot = slot; + pInfo->commitPoint = pos; + pObj->commitCount = count; +} + +TSKEY vnodeGetFirstKey(int vnode) { + SMeterObj * pObj; + SCacheInfo * pInfo; + SCacheBlock *pCacheBlock; + + SVnodeCfg *pCfg = &vnodeList[vnode].cfg; + TSKEY key = taosGetTimestamp(pCfg->precision); + + for (int sid = 0; sid < pCfg->maxSessions; ++sid) { + pObj = vnodeList[vnode].meterList[sid]; + if (pObj == NULL || pObj->pCache == NULL) continue; + + pInfo = (SCacheInfo *)pObj->pCache; + pCacheBlock = pInfo->cacheBlocks[0]; + + if (pCacheBlock == NULL || pCacheBlock->numOfPoints <= 0) continue; + + if (*((TSKEY *)(pCacheBlock->offset[0])) < key) key = *((TSKEY *)(pCacheBlock->offset[0])); + } + + return key; +} + +pthread_t vnodeCreateCommitThread(SVnodeObj *pVnode) { + // this function has to mutex locked before it is called + + pthread_attr_t thattr; + SCachePool * pPool = (SCachePool *)pVnode->pCachePool; + + if (pPool->commitInProcess) { + dTrace("vid:%d, commit is already in process", pVnode->vnode); + return pVnode->commitThread; + } + + taosTmrStopA(&pVnode->commitTimer); + + if (pVnode->status == TSDB_STATUS_UNSYNCED) { + taosTmrReset(vnodeProcessCommitTimer, pVnode->cfg.commitTime * 1000, pVnode, vnodeTmrCtrl, &pVnode->commitTimer); + dTrace("vid:%d, it is in unsyc state, commit later", pVnode->vnode); + return pVnode->commitThread; + } + + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); + if (pthread_create(&(pVnode->commitThread), &thattr, vnodeCommitToFile, pVnode) != 0) { + dError("vid:%d, failed to create thread to commit file, reason:%s", pVnode->vnode, strerror(errno)); + } else { + pPool->commitInProcess = 1; + dTrace("vid:%d, commit thread: 0x%lx is created", pVnode->vnode, pVnode->commitThread); + } + + pthread_attr_destroy(&thattr); + + return pVnode->commitThread; +} + +void vnodeProcessCommitTimer(void *param, void *tmrId) { + SVnodeObj * pVnode = (SVnodeObj *)param; + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + + pthread_mutex_lock(&pPool->vmutex); + + vnodeCreateCommitThread(pVnode); + + pthread_mutex_unlock(&pPool->vmutex); +} + +void vnodeCommitOver(SVnodeObj *pVnode) { + SCachePool *pPool = (SCachePool *)(pVnode->pCachePool); + + taosTmrReset(vnodeProcessCommitTimer, pVnode->cfg.commitTime * 1000, pVnode, vnodeTmrCtrl, &pVnode->commitTimer); + + pthread_mutex_lock(&pPool->vmutex); + + pPool->commitInProcess = 0; + dTrace("vid:%d, commit is over, notFreeSlots:%d", pPool->vnode, pPool->notFreeSlots); + + pthread_mutex_unlock(&pPool->vmutex); +} + +void vnodeCancelCommit(SVnodeObj *pVnode) { + SCachePool *pPool = (SCachePool *)(pVnode->pCachePool); + if (pPool == NULL) return; + + pthread_mutex_lock(&pPool->vmutex); + + if (pPool->commitInProcess) { + pPool->commitInProcess = 0; + pthread_cancel(pVnode->commitThread); + } + + pthread_mutex_unlock(&pPool->vmutex); + + taosTmrReset(vnodeProcessCommitTimer, pVnode->cfg.commitTime * 1000, pVnode, vnodeTmrCtrl, &pVnode->commitTimer); +} + +int vnodeAllocateCacheBlock(SMeterObj *pObj) { + int index; + SCachePool * pPool; + SCacheBlock *pCacheBlock; + SCacheInfo * pInfo; + SVnodeObj * pVnode; + int skipped = 0, commit = 0; + + pVnode = vnodeList + pObj->vnode; + pPool = (SCachePool *)pVnode->pCachePool; + pInfo = (SCacheInfo *)pObj->pCache; + SVnodeCfg *pCfg = &(vnodeList[pObj->vnode].cfg); + + if (pPool == NULL) return -1; + pthread_mutex_lock(&pPool->vmutex); + + if (pInfo == NULL || pInfo->cacheBlocks == NULL) { + pthread_mutex_unlock(&pPool->vmutex); + dError("vid:%d sid:%d id:%s, meter is not there", pObj->vnode, pObj->sid, pObj->meterId); + return -1; + } + + if (pPool->count <= 1) { + if (pVnode->commitTimer == NULL) + pVnode->commitTimer = taosTmrStart(vnodeProcessCommitTimer, pCfg->commitTime * 1000, pVnode, vnodeTmrCtrl); + } + + if (pInfo->unCommittedBlocks >= pInfo->maxBlocks-1) { + vnodeCreateCommitThread(pVnode); + pthread_mutex_unlock(&pPool->vmutex); + dError("vid:%d sid:%d id:%s, all blocks are not committed yet....", pObj->vnode, pObj->sid, pObj->meterId); + return -1; + } + + while (1) { + pCacheBlock = (SCacheBlock *)(pPool->pMem[((int64_t)pPool->freeSlot)]); + if (pCacheBlock->blockId == 0) break; + + if (pCacheBlock->notFree) { + pPool->freeSlot++; + pPool->freeSlot = pPool->freeSlot % pCfg->cacheNumOfBlocks.totalBlocks; + skipped++; + if (skipped > pPool->threshold) { + vnodeCreateCommitThread(pVnode); + pthread_mutex_unlock(&pPool->vmutex); + dError("vid:%d sid:%d id:%s, committing process is too slow, notFreeSlots:%d....", + pObj->vnode, pObj->sid, pObj->meterId, pPool->notFreeSlots); + return -1; + } + } else { + SMeterObj *pRelObj = pCacheBlock->pMeterObj; + SCacheInfo *pRelInfo = (SCacheInfo *)pRelObj->pCache; + int firstSlot = (pRelInfo->currentSlot - pRelInfo->numOfBlocks + 1 + pRelInfo->maxBlocks) % pRelInfo->maxBlocks; + pCacheBlock = pRelInfo->cacheBlocks[firstSlot]; + if (pCacheBlock) { + pPool->freeSlot = pCacheBlock->index; + vnodeFreeCacheBlock(pCacheBlock); + break; + } else { + pPool->freeSlot = (pPool->freeSlot + 1) % pCfg->cacheNumOfBlocks.totalBlocks; + skipped++; + } + } + } + + index = pPool->freeSlot; + pPool->freeSlot++; + pPool->freeSlot = pPool->freeSlot % pCfg->cacheNumOfBlocks.totalBlocks; + pPool->notFreeSlots++; + + pCacheBlock->pMeterObj = pObj; + pCacheBlock->notFree = 1; + pCacheBlock->index = index; + + pCacheBlock->offset[0] = ((char *)(pCacheBlock)) + sizeof(SCacheBlock) + pObj->numOfColumns * sizeof(char *); + for (int col = 1; col < pObj->numOfColumns; ++col) + pCacheBlock->offset[col] = pCacheBlock->offset[col - 1] + pObj->schema[col - 1].bytes * pObj->pointsPerBlock; + + pInfo->numOfBlocks++; + pInfo->blocks++; + pInfo->unCommittedBlocks++; + pInfo->currentSlot = (pInfo->currentSlot + 1) % pInfo->maxBlocks; + pCacheBlock->blockId = pInfo->blocks; + pCacheBlock->slot = pInfo->currentSlot; + if (pInfo->numOfBlocks > pInfo->maxBlocks) { + pCacheBlock = pInfo->cacheBlocks[pInfo->currentSlot]; + vnodeFreeCacheBlock(pCacheBlock); + } + + pInfo->cacheBlocks[pInfo->currentSlot] = (SCacheBlock *)(pPool->pMem[(int64_t)index]); + dTrace("vid:%d sid:%d id:%s, allocate a cache block, numOfBlocks:%d, slot:%d, index:%d notFreeSlots:%d blocks:%d", + pObj->vnode, pObj->sid, pObj->meterId, pInfo->numOfBlocks, pInfo->currentSlot, index, pPool->notFreeSlots, + pInfo->blocks); + + if (((pPool->notFreeSlots > pPool->threshold) || (pInfo->unCommittedBlocks >= pInfo->maxBlocks / 2))) { + dTrace("vid:%d sid:%d id:%s, too many unCommitted slots, unCommitted:%d notFreeSlots:%d", + pObj->vnode, pObj->sid, pObj->meterId, pInfo->unCommittedBlocks, pPool->notFreeSlots); + vnodeCreateCommitThread(pVnode); + commit = 1; + } + + pthread_mutex_unlock(&pPool->vmutex); + + return commit; +} + +int vnodeInsertPointToCache(SMeterObj *pObj, char *pData) { + SCacheBlock *pCacheBlock; + SCacheInfo * pInfo; + SCachePool * pPool; + + pInfo = (SCacheInfo *)pObj->pCache; + pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; + + if (pInfo->numOfBlocks == 0) { + if (vnodeAllocateCacheBlock(pObj) < 0) { + return -1; + } + } + + if (pInfo->currentSlot < 0) return -1; + pCacheBlock = pInfo->cacheBlocks[pInfo->currentSlot]; + if (pCacheBlock->numOfPoints >= pObj->pointsPerBlock) { + if (vnodeAllocateCacheBlock(pObj) < 0) return -1; + pCacheBlock = pInfo->cacheBlocks[pInfo->currentSlot]; + } + + for (int col = 0; col < pObj->numOfColumns; ++col) { + memcpy(pCacheBlock->offset[col] + pCacheBlock->numOfPoints * pObj->schema[col].bytes, pData, + pObj->schema[col].bytes); + pData += pObj->schema[col].bytes; + } + + __sync_fetch_and_sub(&pObj->freePoints, 1); + pCacheBlock->numOfPoints++; + pPool->count++; + + return 0; +} + +void vnodeUpdateQuerySlotPos(SCacheInfo *pInfo, SQuery *pQuery) { + SCacheBlock *pCacheBlock; + + int step = QUERY_IS_ASC_QUERY(pQuery) ? -1 : 1; + + if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->slot == pQuery->currentSlot)) || + (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->slot == pQuery->firstSlot))) { + pQuery->over = 1; + + } else { + pQuery->slot = (pQuery->slot - step + pInfo->maxBlocks) % pInfo->maxBlocks; + pCacheBlock = pInfo->cacheBlocks[pQuery->slot]; + pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : pCacheBlock->numOfPoints - 1; + } +} + +static FORCE_INLINE TSKEY vnodeGetTSInCacheBlock(SCacheBlock *pCacheBlock, int32_t pos) { + return *(TSKEY *)(pCacheBlock->offset[PRIMARYKEY_TIMESTAMP_COL_INDEX] + pos * TSDB_KEYSIZE); +} + +int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { + SCacheBlock *pCacheBlock; + int col, step; + char * pRead, *pData; + SCacheInfo * pInfo; + int lastPos = -1; + int startPos, numOfReads, numOfPoints; + + pQuery->pointsRead = 0; + if (pQuery->over) return 0; + + vnodeFreeFields(pQuery); + + pInfo = (SCacheInfo *)pObj->pCache; + if ((pInfo == NULL) || (pInfo->numOfBlocks == 0)) { + pQuery->over = 1; + return 0; + } + + if (pQuery->slot < 0 || pQuery->pos < 0) // it means a new query, we need to find the point first + vnodeSearchPointInCache(pObj, pQuery); + + if (pQuery->slot < 0 || pQuery->pos < 0) { + pQuery->over = 1; + return 0; + } + + step = QUERY_IS_ASC_QUERY(pQuery) ? -1 : 1; + pCacheBlock = pInfo->cacheBlocks[pQuery->slot]; + numOfPoints = pCacheBlock->numOfPoints; + + int maxReads = QUERY_IS_ASC_QUERY(pQuery) ? numOfPoints - pQuery->pos : pQuery->pos + 1; + if (maxReads <= 0) { + vnodeUpdateQuerySlotPos(pInfo, pQuery); + return 0; + } + + if (QUERY_IS_ASC_QUERY(pQuery)) { + TSKEY endkey = vnodeGetTSInCacheBlock(pCacheBlock, numOfPoints - 1); + if (endkey < pQuery->ekey) { + numOfReads = maxReads; + } else { + lastPos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])( + pCacheBlock->offset[PRIMARYKEY_TIMESTAMP_COL_INDEX] + TSDB_KEYSIZE * pQuery->pos, maxReads, pQuery->ekey, 0); + numOfReads = (lastPos >= 0) ? lastPos + 1 : 0; + } + } else { + TSKEY startkey = vnodeGetTSInCacheBlock(pCacheBlock, 0); + if (startkey > pQuery->ekey) { + numOfReads = maxReads; + } else { + lastPos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])(pCacheBlock->offset[PRIMARYKEY_TIMESTAMP_COL_INDEX], + maxReads, pQuery->ekey, 1); + numOfReads = (lastPos >= 0) ? pQuery->pos - lastPos + 1 : 0; + } + } + + if (numOfReads > pQuery->pointsToRead - pQuery->pointsRead) { + numOfReads = pQuery->pointsToRead - pQuery->pointsRead; + } else { + if (lastPos >= 0 || numOfReads == 0) { + pQuery->keyIsMet = 1; + pQuery->over = 1; + } + } + + startPos = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos : pQuery->pos - numOfReads + 1; + + int32_t numOfQualifiedPoints = 0; + int32_t numOfActualRead = numOfReads; + + if (pQuery->numOfFilterCols == 0) { + for (col = 0; col < pQuery->numOfOutputCols; ++col) { + int16_t colIdx = pQuery->pSelectExpr[col].pBase.colInfo.colIdx; + + int16_t bytes = GET_COLUMN_BYTES(pQuery, col); + int16_t type = GET_COLUMN_TYPE(pQuery, col); + + pData = pQuery->sdata[col]->data + pQuery->pointsOffset * bytes; + /* this column is absent from current block, fill this block with null + * value */ + if (colIdx < 0 || colIdx >= pObj->numOfColumns || + pObj->schema[colIdx].colId != pQuery->pSelectExpr[col].pBase.colInfo.colId) { // set null + setNullN(pData, type, bytes, pCacheBlock->numOfPoints); + } else { + pRead = pCacheBlock->offset[colIdx] + startPos * bytes; + memcpy(pData, pRead, numOfReads * bytes); + } + } + numOfQualifiedPoints = numOfReads; + } else { // check each data one by one + // set the input column data + for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { + int16_t colIdx = pQuery->pFilterInfo[k].pFilter.colIdx; + + if (colIdx < 0) { + /* current data has not specified column */ + pQuery->pFilterInfo[k].pData = NULL; + } else { + pQuery->pFilterInfo[k].pData = pCacheBlock->offset[colIdx]; + } + } + + int32_t *ids = calloc(1, numOfReads * sizeof(int32_t)); + numOfActualRead = 0; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + for (int32_t j = startPos; j < pCacheBlock->numOfPoints; ++j) { + TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); + assert(key >= pQuery->skey); + + if (key > pQuery->ekey) { + break; + } + + if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { + continue; + } + + ids[numOfQualifiedPoints] = j; + if (++numOfQualifiedPoints == numOfReads) { + // qualified data are enough + break; + } + } + } else { + startPos = pQuery->pos; + for (int32_t j = startPos; j >= 0; --j) { + TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); + assert(key <= pQuery->skey); + + if (key < pQuery->ekey) { + break; + } + + if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { + continue; + } + + ids[numOfReads - numOfQualifiedPoints - 1] = j; + if (++numOfQualifiedPoints == numOfReads) { + // qualified data are enough + break; + } + } + } + + int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; + for (int32_t j = 0; j < numOfQualifiedPoints; ++j) { + for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { + int16_t colIndex = pQuery->pSelectExpr[col].pBase.colInfo.colIdx; + + int32_t bytes = pObj->schema[colIndex].bytes; + pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes; + pRead = pCacheBlock->offset[colIndex] + ids[j + start] * bytes; + + memcpy(pData, pRead, bytes); + } + } + + tfree(ids); + assert(numOfQualifiedPoints <= numOfReads); + } + + pQuery->pointsRead += numOfQualifiedPoints; + pQuery->pos -= numOfActualRead * step; + + // update the skey/lastkey + int32_t lastAccessPos = pQuery->pos + step; + pQuery->lastKey = vnodeGetTSInCacheBlock(pCacheBlock, lastAccessPos); + pQuery->skey = pQuery->lastKey - step; + + int update = 0; // go to next slot after this round + if ((pQuery->pos < 0 || pQuery->pos >= pObj->pointsPerBlock || numOfReads == 0) && (pQuery->over == 0)) update = 1; + + // if block is changed, it shall be thrown away, it won't happen for committing + if (pObj != pCacheBlock->pMeterObj || pCacheBlock->blockId > pQuery->blockId) { + update = 1; + pQuery->pointsRead = 0; + dWarn("vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d", + pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, pCacheBlock->blockId, pQuery->blockId); + } + + if (update) vnodeUpdateQuerySlotPos(pInfo, pQuery); + + for (col = 0; col < pQuery->numOfOutputCols; ++col) { + int16_t bytes = GET_COLUMN_BYTES(pQuery, col); + pQuery->sdata[col]->len = bytes * (pQuery->pointsRead + pQuery->pointsOffset); + } + return pQuery->pointsRead; +} + +void vnodeSearchPointInCache(SMeterObj *pObj, SQuery *pQuery) { + int numOfBlocks; + int firstSlot, lastSlot, midSlot; + TSKEY keyFirst, keyLast; + SCacheBlock *pBlock; + SCacheInfo * pInfo = (SCacheInfo *)pObj->pCache; + SCachePool * pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; + + pQuery->slot = -1; + pQuery->pos = -1; + + // save these variables first in case it may be changed by write operation + pthread_mutex_lock(&pPool->vmutex); + numOfBlocks = pInfo->numOfBlocks; + lastSlot = pInfo->currentSlot; + pthread_mutex_unlock(&pPool->vmutex); + if (numOfBlocks <= 0) return; + + firstSlot = (lastSlot - numOfBlocks + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + + // make sure it is there, otherwise, return right away + pBlock = pInfo->cacheBlocks[firstSlot]; + keyFirst = vnodeGetTSInCacheBlock(pBlock, 0); + + pBlock = pInfo->cacheBlocks[lastSlot]; + keyLast = vnodeGetTSInCacheBlock(pBlock, pBlock->numOfPoints - 1); + + pQuery->blockId = pBlock->blockId; + pQuery->currentSlot = lastSlot; + pQuery->numOfBlocks = numOfBlocks; + pQuery->firstSlot = firstSlot; + + if (!QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->skey < keyFirst) return; + if (pQuery->ekey > keyLast) return; + } else { + if (pQuery->skey > keyLast) return; + if (pQuery->ekey < keyFirst) return; + } + + while (1) { + numOfBlocks = (lastSlot - firstSlot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + if (numOfBlocks == 0) numOfBlocks = pInfo->maxBlocks; + midSlot = (firstSlot + (numOfBlocks >> 1)) % pInfo->maxBlocks; + pBlock = pInfo->cacheBlocks[midSlot]; + + keyFirst = vnodeGetTSInCacheBlock(pBlock, 0); + keyLast = vnodeGetTSInCacheBlock(pBlock, pBlock->numOfPoints - 1); + + if (numOfBlocks == 1) break; + + if (pQuery->skey > keyLast) { + if (numOfBlocks == 2) break; + if (!QUERY_IS_ASC_QUERY(pQuery)) { + int nextSlot = (midSlot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + SCacheBlock *pNextBlock = pInfo->cacheBlocks[nextSlot]; + TSKEY nextKeyFirst = vnodeGetTSInCacheBlock(pNextBlock, 0); + if (pQuery->skey < nextKeyFirst) break; + } + firstSlot = (midSlot + 1) % pInfo->maxBlocks; + } else if (pQuery->skey < keyFirst) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + int prevSlot = (midSlot - 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + SCacheBlock *pPrevBlock = pInfo->cacheBlocks[prevSlot]; + TSKEY prevKeyLast = vnodeGetTSInCacheBlock(pPrevBlock, pPrevBlock->numOfPoints - 1); + + if (pQuery->skey > prevKeyLast) break; + } + lastSlot = (midSlot - 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + } else { + break; // got the slot + } + } + + pQuery->slot = midSlot; + if (!QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->skey < keyFirst) return; + + if (pQuery->ekey > keyLast) { + pQuery->slot = (midSlot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + return; + } + } else { + if (pQuery->skey > keyLast) { + pQuery->slot = (midSlot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + return; + } + + if (pQuery->ekey < keyFirst) return; + } + + // midSlot and pBlock is the search result + + pBlock = pInfo->cacheBlocks[midSlot]; + pQuery->pos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])(pBlock->offset[0], pBlock->numOfPoints, pQuery->skey, + pQuery->order.order); + pQuery->key = vnodeGetTSInCacheBlock(pBlock, pQuery->pos); + + if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols == 0) { + int maxReads = QUERY_IS_ASC_QUERY(pQuery) ? pBlock->numOfPoints - pQuery->pos : pQuery->pos + 1; + + if (pQuery->limit.offset < maxReads) { // start position in current block + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->pos += pQuery->limit.offset; + } else { + pQuery->pos -= pQuery->limit.offset; + } + + pQuery->key = vnodeGetTSInCacheBlock(pBlock, pQuery->pos); + pQuery->limit.offset = 0; + } else if (pInfo->numOfBlocks == 1) { + pQuery->pos = -1; // no qualified data + } else { + int step = QUERY_IS_ASC_QUERY(pQuery) ? 1 : -1; + + pQuery->limit.offset -= maxReads; + midSlot = (midSlot + step + pInfo->maxBlocks) % pInfo->maxBlocks; + + bool hasData = true; + while (pQuery->limit.offset > pInfo->cacheBlocks[midSlot]->numOfPoints) { + pQuery->limit.offset -= pInfo->cacheBlocks[midSlot]->numOfPoints; + + if ((QUERY_IS_ASC_QUERY(pQuery) && midSlot == pQuery->currentSlot) || + (!QUERY_IS_ASC_QUERY(pQuery) && midSlot == pQuery->firstSlot)) { // no qualified data in cache + hasData = false; + break; + } + midSlot = (midSlot + step + pInfo->maxBlocks) % pInfo->maxBlocks; + } + + if (hasData) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->pos = pQuery->limit.offset; + } else { + pQuery->pos = pInfo->cacheBlocks[midSlot]->numOfPoints - pQuery->limit.offset - 1; + } + pQuery->limit.offset = 0; + pQuery->slot = midSlot; + + pQuery->key = vnodeGetTSInCacheBlock(pInfo->cacheBlocks[midSlot], pQuery->pos); + } else { + pQuery->pos = -1; // no qualified data + + pBlock = pInfo->cacheBlocks[midSlot]; + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->lastKey = vnodeGetTSInCacheBlock(pBlock, pBlock->numOfPoints - 1); + pQuery->skey = pQuery->lastKey + 1; + } else { + pQuery->lastKey = vnodeGetTSInCacheBlock(pBlock, 0); + pQuery->skey = pQuery->lastKey - 1; + } + } + } + } + + return; +} + +void vnodeSetCommitQuery(SMeterObj *pObj, SQuery *pQuery) { + SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; + SCachePool *pPool = (SCachePool *)vnodeList[pObj->vnode].pCachePool; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + + pQuery->order.order = TSQL_SO_ASC; + pQuery->numOfCols = pObj->numOfColumns; + pQuery->numOfOutputCols = pObj->numOfColumns; + + for (int16_t col = 0; col < pObj->numOfColumns; ++col) { + pQuery->colList[col].colIdxInBuf = col; + + pQuery->colList[col].data.colId = pObj->schema[col].colId; + pQuery->colList[col].data.bytes = pObj->schema[col].bytes; + pQuery->colList[col].data.type = pObj->schema[col].type; + + SColIndexEx *pColIndexEx = &pQuery->pSelectExpr[col].pBase.colInfo; + + pColIndexEx->colId = pObj->schema[col].colId; + pColIndexEx->colIdx = col; + pColIndexEx->colIdxInBuf = col; + pColIndexEx->isTag = false; + } + + pQuery->slot = pInfo->commitSlot; + pQuery->pos = pInfo->commitPoint; + pQuery->over = 0; + + pthread_mutex_lock(&pPool->vmutex); + pQuery->currentSlot = pInfo->currentSlot; + pQuery->numOfBlocks = pInfo->numOfBlocks; + pthread_mutex_unlock(&pPool->vmutex); + + if (pQuery->numOfBlocks <= 0 || pQuery->firstSlot < 0) { + pQuery->over = 1; + return; + } + + pQuery->firstSlot = (pQuery->currentSlot - pQuery->numOfBlocks + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + pQuery->blockId = pInfo->cacheBlocks[pQuery->currentSlot]->blockId; + + SCacheBlock *pCacheBlock; + pCacheBlock = pInfo->cacheBlocks[pInfo->commitSlot]; + if (pInfo->commitSlot == pQuery->currentSlot && pInfo->commitPoint == pCacheBlock->numOfPoints) { + dTrace("vid:%d sid:%d id:%s, no new data to commit", pObj->vnode, pObj->sid, pObj->meterId); + pQuery->over = 1; + return; + } + + if (pQuery->pos == pObj->pointsPerBlock) { + pQuery->slot = (pQuery->slot + 1) % pInfo->maxBlocks; + pQuery->pos = 0; + } + + pCacheBlock = pInfo->cacheBlocks[pQuery->slot]; + TSKEY firstKey = *((TSKEY *)(pCacheBlock->offset[0] + pQuery->pos * pObj->schema[0].bytes)); + + if (firstKey < pQuery->skey) { + pQuery->over = 1; + dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%ld commitFirstKey:%ld", + pObj->vnode, pObj->sid, pObj->meterId, firstKey, pQuery->skey); + pthread_mutex_lock(&(pVnode->vmutex)); + if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + pthread_mutex_unlock(&(pVnode->vmutex)); + } +} + +int vnodeIsCacheCommitted(SMeterObj *pObj) { + if (pObj->pCache == NULL) return 1; + + SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; + if (pInfo->currentSlot < 0) return 1; + + SCacheBlock *pBlock = pInfo->cacheBlocks[pInfo->currentSlot]; + if (pInfo->commitSlot != pInfo->currentSlot) return 0; + if (pInfo->commitPoint != pBlock->numOfPoints) return 0; + + return 1; +} diff --git a/src/system/src/vnodeCommit.c b/src/system/src/vnodeCommit.c new file mode 100644 index 000000000000..bef3639ea0df --- /dev/null +++ b/src/system/src/vnodeCommit.c @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "tsdb.h" +#include "vnode.h" + +typedef struct { + char action; + int sversion; + int sid; + int contLen; +} SCommitHead; + +int vnodeOpenCommitLog(int vnode, uint64_t firstV) { + SVnodeObj *pVnode = vnodeList + vnode; + char * fileName = pVnode->logFn; + + pVnode->logFd = open(fileName, O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + if (pVnode->logFd < 0) { + dError("vid:%d, failed to open file:%s, reason:%s", vnode, fileName, strerror(errno)); + return -1; + } + + dTrace("vid:%d, logfd:%d, open file:%s success", vnode, pVnode->logFd, fileName); + if (posix_fallocate64(pVnode->logFd, 0, pVnode->mappingSize) != 0) { + dError("vid:%d, logfd:%d, failed to alloc file size:%d", vnode, pVnode->logFd, pVnode->mappingSize); + perror("fallocate failed"); + return -1; + } + + struct stat statbuf; + stat(fileName, &statbuf); + int64_t length = statbuf.st_size; + + if (length != pVnode->mappingSize) { + dError("vid:%d, logfd:%d, alloc file size:%ld not equal to mapping size:%ld", vnode, pVnode->logFd, length, + pVnode->mappingSize); + return -1; + } + + pVnode->pMem = mmap(0, pVnode->mappingSize, PROT_WRITE | PROT_READ, MAP_SHARED, pVnode->logFd, 0); + if (pVnode->pMem == MAP_FAILED) { + dError("vid:%d, logfd:%d, failed to map file, reason:%s", vnode, pVnode->logFd, strerror(errno)); + return -1; + } + + pVnode->pWrite = pVnode->pMem; + memcpy(pVnode->pWrite, &(firstV), sizeof(firstV)); + pVnode->pWrite += sizeof(firstV); + + return pVnode->logFd; +} + +int vnodeRenewCommitLog(int vnode) { + SVnodeObj *pVnode = vnodeList + vnode; + char * fileName = pVnode->logFn; + char * oldName = pVnode->logOFn; + + pthread_mutex_lock(&(pVnode->logMutex)); + + if (VALIDFD(pVnode->logFd)) { + munmap(pVnode->pMem, pVnode->mappingSize); + tclose(pVnode->logFd); + rename(fileName, oldName); + } + + if (pVnode->cfg.commitLog) vnodeOpenCommitLog(vnode, vnodeList[vnode].version); + + pthread_mutex_unlock(&(pVnode->logMutex)); + + return pVnode->logFd; +} + +void vnodeRemoveCommitLog(int vnode) { remove(vnodeList[vnode].logOFn); } + +size_t vnodeRestoreDataFromLog(int vnode, char *fileName, uint64_t *firstV) { + int fd, ret; + char * cont = NULL; + size_t totalLen = 0; + int actions = 0; + + SVnodeObj *pVnode = vnodeList + vnode; + if (pVnode->meterList == NULL) { + dError("vid:%d, vnode is not initialized!!!", vnode); + return 0; + } + + struct stat fstat; + if (stat(fileName, &fstat) < 0) { + dTrace("vid:%d, no log file:%s", vnode, fileName); + return 0; + } + + dTrace("vid:%d, uncommitted data in file:%s, restore them ...", vnode, fileName); + + fd = open(fileName, O_RDWR); + if (fd < 0) { + dError("vid:%d, failed to open:%s, reason:%s", vnode, fileName); + goto _error; + } + + ret = read(fd, firstV, sizeof(pVnode->version)); + if (ret <= 0) { + dError("vid:%d, failed to read version", vnode); + goto _error; + } + pVnode->version = *firstV; + + int32_t bufLen = TSDB_PAYLOAD_SIZE; + cont = calloc(1, bufLen); + if (cont == NULL) { + dError("vid:%d, out of memory", vnode); + goto _error; + } + + SCommitHead head; + while (1) { + ret = read(fd, &head, sizeof(head)); + if (ret < 0) goto _error; + if (ret == 0) break; + + // head.contLen validation is removed + if (head.sid >= pVnode->cfg.maxSessions || head.sid < 0 || head.action >= TSDB_ACTION_MAX) { + dError("vid, invalid commit head, sid:%d contLen:%d action:%d", head.sid, head.contLen, head.action); + } else { + if (head.contLen > 0) { + if (bufLen < head.contLen) { // pre-allocated buffer is not enough + cont = realloc(cont, head.contLen); + bufLen = head.contLen; + } + + if (read(fd, cont, head.contLen) < 0) goto _error; + SMeterObj *pObj = pVnode->meterList[head.sid]; + if (pObj == NULL) { + dError( + "vid:%d, sid:%d not exists, ignore data in commit log, " + "contLen:%d action:%d", + vnode, head.sid, head.contLen, head.action); + continue; + } + + int32_t numOfPoints = 0; + (*vnodeProcessAction[head.action])(pObj, cont, head.contLen, TSDB_DATA_SOURCE_LOG, NULL, head.sversion, + &numOfPoints); + actions++; + } else { + break; + } + } + + totalLen += sizeof(head) + head.contLen; + } + + tclose(fd); + tfree(cont); + dTrace("vid:%d, %d pieces of uncommitted data are restored", vnode, actions); + + return totalLen; + +_error: + tclose(fd); + tfree(cont); + dError("vid:%d, failed to restore %s, remove this node...", vnode, fileName); + + // rename to error file for future process + char *f = NULL; + taosFileRename(fileName, "error", '/', &f); + free(f); + + return -1; +} + +int vnodeInitCommit(int vnode) { + size_t size = 0; + uint64_t firstV = 0; + SVnodeObj *pVnode = vnodeList + vnode; + + pthread_mutex_init(&(pVnode->logMutex), NULL); + + sprintf(pVnode->logFn, "%s/vnode%d/db/submit%d.log", tsDirectory, vnode, vnode); + sprintf(pVnode->logOFn, "%s/vnode%d/db/submit%d.olog", tsDirectory, vnode, vnode); + pVnode->mappingSize = ((int64_t)pVnode->cfg.cacheBlockSize) * pVnode->cfg.cacheNumOfBlocks.totalBlocks * 1.5; + pVnode->mappingThreshold = pVnode->mappingSize * 0.7; + + // restore from .olog file and commit to file + size = vnodeRestoreDataFromLog(vnode, pVnode->logOFn, &firstV); + if (size < 0) return -1; + if (size > 0) { + if (pVnode->commitInProcess == 0) vnodeCommitToFile(pVnode); + remove(pVnode->logOFn); + } + + // restore from .log file to cache + size = vnodeRestoreDataFromLog(vnode, pVnode->logFn, &firstV); + if (size < 0) return -1; + + if (pVnode->cfg.commitLog == 0) return 0; + + if (size == 0) firstV = pVnode->version; + if (vnodeOpenCommitLog(vnode, firstV) < 0) { + dError("vid:%d, commit log init failed", vnode); + return -1; + } + + pVnode->pWrite += size; + dTrace("vid:%d, commit log is initialized", vnode); + + return 0; +} + +void vnodeCleanUpCommit(int vnode) { + SVnodeObj *pVnode = vnodeList + vnode; + + if (pVnode->logFd) tclose(pVnode->logFd); + + if (pVnode->cfg.commitLog && remove(pVnode->logFn) < 0) { + dError("vid:%d, failed to remove:%s", vnode, pVnode->logFn); + taosLogError("vid:%d, failed to remove:%s", vnode, pVnode->logFn); + } + + pthread_mutex_destroy(&(pVnode->logMutex)); +} + +int vnodeWriteToCommitLog(SMeterObj *pObj, char action, char *cont, int contLen, int sverion) { + SVnodeObj *pVnode = vnodeList + pObj->vnode; + if (pVnode->pWrite == NULL) return 0; + + SCommitHead head; + head.sid = pObj->sid; + head.action = action; + head.sversion = pObj->sversion; + head.contLen = contLen; + + pthread_mutex_lock(&(pVnode->logMutex)); + // 100 bytes redundant mem space + if (pVnode->mappingSize - (pVnode->pWrite - pVnode->pMem) < contLen + sizeof(SCommitHead) + 100) { + pthread_mutex_unlock(&(pVnode->logMutex)); + dTrace("vid:%d, mem mapping space is not enough, wait for commit", pObj->vnode); + vnodeProcessCommitTimer(pVnode, NULL); + return TSDB_CODE_ACTION_IN_PROGRESS; + } + char *pWrite = pVnode->pWrite; + pVnode->pWrite += sizeof(head) + contLen; + memcpy(pWrite, (char *)&head, sizeof(head)); + memcpy(pWrite + sizeof(head), cont, contLen); + pthread_mutex_unlock(&(pVnode->logMutex)); + + if (pVnode->pWrite - pVnode->pMem > pVnode->mappingThreshold) { + dTrace("vid:%d, mem mapping is close to limit, commit", pObj->vnode); + vnodeProcessCommitTimer(pVnode, NULL); + } + + dTrace("vid:%d sid:%d, data is written to commit log", pObj->vnode, pObj->sid); + + return 0; +} diff --git a/src/system/src/vnodeFile.c b/src/system/src/vnodeFile.c new file mode 100644 index 000000000000..13efa77c9d67 --- /dev/null +++ b/src/system/src/vnodeFile.c @@ -0,0 +1,1801 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tscompression.h" +#include "tutil.h" +#include "vnode.h" +#include "vnodeFile.h" +#include "vnodeUtil.h" + +#define FILE_QUERY_NEW_BLOCK -5 // a special negative number + +const int16_t vnodeFileVersion = 0; + +int (*pCompFunc[])(const char *const input, int inputSize, const int elements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) = {NULL, + tsCompressBool, + tsCompressTinyint, + tsCompressSmallint, + tsCompressInt, + tsCompressBigint, + tsCompressFloat, + tsCompressDouble, + tsCompressString, + tsCompressTimestamp, + tsCompressString}; + +int (*pDecompFunc[])(const char *const input, int compressedSize, const int elements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) = {NULL, + tsDecompressBool, + tsDecompressTinyint, + tsDecompressSmallint, + tsDecompressInt, + tsDecompressBigint, + tsDecompressFloat, + tsDecompressDouble, + tsDecompressString, + tsDecompressTimestamp, + tsDecompressString}; + +int vnodeUpdateFileMagic(int vnode, int fileId); +int vnodeRecoverCompHeader(int vnode, int fileId); +int vnodeRecoverHeadFile(int vnode, int fileId); +int vnodeRecoverDataFile(int vnode, int fileId); +int vnodeForwardStartPosition(SQuery *pQuery, SCompBlock *pBlock, int32_t slotIdx, SVnodeObj *pVnode, SMeterObj *pObj); + +int64_t tsendfile(int dfd, int sfd, int64_t bytes) { + int64_t leftbytes = bytes; + off_t offset = 0; + int64_t sentbytes; + + while (leftbytes > 0) { + sentbytes = (leftbytes > 1000000000) ? 1000000000 : leftbytes; + sentbytes = sendfile(dfd, sfd, &offset, sentbytes); + if (sentbytes < 0) { + dError("send file failed,reason:%s", strerror(errno)); + return -1; + } + + leftbytes -= sentbytes; + // dTrace("sentbytes:%ld leftbytes:%ld", sentbytes, leftbytes); + } + + return bytes; +} + +void vnodeGetHeadDataLname(char *headName, char *dataName, char *lastName, int vnode, int fileId) { + if (headName != NULL) sprintf(headName, "%s/vnode%d/db/v%df%d.head", tsDirectory, vnode, vnode, fileId); + if (dataName != NULL) sprintf(dataName, "%s/vnode%d/db/v%df%d.data", tsDirectory, vnode, vnode, fileId); + if (lastName != NULL) sprintf(lastName, "%s/vnode%d/db/v%df%d.last", tsDirectory, vnode, vnode, fileId); +} + +void vnodeGetHeadDataDname(char *dHeadName, char *dDataName, char *dLastName, int vnode, int fileId, char *path) { + if (dHeadName != NULL) sprintf(dHeadName, "%s/data/vnode%d/v%df%d.head0", path, vnode, vnode, fileId); + if (dDataName != NULL) sprintf(dDataName, "%s/data/vnode%d/v%df%d.data", path, vnode, vnode, fileId); + if (dLastName != NULL) sprintf(dLastName, "%s/data/vnode%d/v%df%d.last0", path, vnode, vnode, fileId); +} + +void vnodeGetDnameFromLname(char *lhead, char *ldata, char *llast, char *dhead, char *ddata, char *dlast) { + if (lhead != NULL) { + assert(dhead != NULL); + readlink(lhead, dhead, TSDB_FILENAME_LEN); + } + + if (ldata != NULL) { + assert(ddata != NULL); + readlink(ldata, ddata, TSDB_FILENAME_LEN); + } + + if (llast != NULL) { + assert(dlast != NULL); + readlink(llast, dlast, TSDB_FILENAME_LEN); + } +} + +void vnodeGetHeadTname(char *nHeadName, char *nLastName, int vnode, int fileId) { + sprintf(nHeadName, "%s/vnode%d/db/v%df%d.t", tsDirectory, vnode, vnode, fileId); + sprintf(nLastName, "%s/vnode%d/db/v%df%d.l", tsDirectory, vnode, vnode, fileId); +} + +void vnodeCreateDataDirIfNeeded(int vnode, char *path) { + char directory[TSDB_FILENAME_LEN] = "\0"; + + sprintf(directory, "%s/data/vnode%d", path, vnode); + + if (access(directory, F_OK) != 0) mkdir(directory, 0755); +} + +int vnodeCreateHeadDataFile(int vnode, int fileId, char *headName, char *dataName, char *lastName) { + char dHeadName[TSDB_FILENAME_LEN]; + char dDataName[TSDB_FILENAME_LEN]; + char dLastName[TSDB_FILENAME_LEN]; + + vnodeCreateDataDirIfNeeded(vnode, dataDir); + + vnodeGetHeadDataLname(headName, dataName, lastName, vnode, fileId); + vnodeGetHeadDataDname(dHeadName, dDataName, dLastName, vnode, fileId, dataDir); + if (symlink(dHeadName, headName) != 0) return -1; + if (symlink(dDataName, dataName) != 0) return -1; + if (symlink(dLastName, lastName) != 0) return -1; + + dTrace( + "vid:%d, fileId:%d, empty header file:%s dataFile:%s lastFile:%s on " + "disk:%s is created ", + vnode, fileId, headName, dataName, lastName, tsDirectory); + + return 0; +} + +int vnodeCreateEmptyCompFile(int vnode, int fileId) { + char headName[TSDB_FILENAME_LEN]; + char dataName[TSDB_FILENAME_LEN]; + char lastName[TSDB_FILENAME_LEN]; + int tfd; + char *temp; + + if (vnodeCreateHeadDataFile(vnode, fileId, headName, dataName, lastName) < 0) { + dError("failed to create head data file, vnode: %d, fileId: %d", vnode, fileId); + return -1; + } + + tfd = open(headName, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (tfd < 0) { + dError("failed to create head file:%s, reason:%s", headName, strerror(errno)); + return -1; + } + + vnodeCreateFileHeaderFd(tfd); + int size = sizeof(SCompHeader) * vnodeList[vnode].cfg.maxSessions + sizeof(TSCKSUM); + temp = malloc(size); + memset(temp, 0, size); + taosCalcChecksumAppend(0, (uint8_t *)temp, size); + + lseek(tfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + write(tfd, temp, size); + free(temp); + close(tfd); + + tfd = open(dataName, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (tfd < 0) { + dError("failed to create data file:%s, reason:%s", dataName, strerror(errno)); + return -1; + } + vnodeCreateFileHeaderFd(tfd); + close(tfd); + + tfd = open(lastName, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (tfd < 0) { + dError("failed to create last file:%s, reason:%s", lastName, strerror(errno)); + return -1; + } + vnodeCreateFileHeaderFd(tfd); + close(tfd); + + return 0; +} + +int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { + char name[TSDB_FILENAME_LEN]; + char dHeadName[TSDB_FILENAME_LEN] = "\0"; + char dLastName[TSDB_FILENAME_LEN] = "\0"; + int len = 0; + struct stat filestat; + int vnode = pVnode->vnode; + int fileId, numOfFiles, filesAdded = 0; + SVnodeCfg * pCfg = &pVnode->cfg; + + if (pVnode->lastKeyOnFile == 0) { + if (pCfg->daysPerFile == 0) pCfg->daysPerFile = 10; + pVnode->fileId = pVnode->firstKey / tsMsPerDay[pVnode->cfg.precision] / pCfg->daysPerFile; + pVnode->lastKeyOnFile = (long)(pVnode->fileId + 1) * pCfg->daysPerFile * tsMsPerDay[pVnode->cfg.precision] - 1; + pVnode->numOfFiles = 1; + vnodeCreateEmptyCompFile(vnode, pVnode->fileId); + } + + numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[pVnode->cfg.precision] / pCfg->daysPerFile; + if (pVnode->commitFirstKey > pVnode->lastKeyOnFile) numOfFiles = -1; + + dTrace( + "vid:%d, commitFirstKey:%ld lastKeyOnFile:%ld numOfFiles:%d fileId:%d " + "vnodeNumOfFiles:%d", + vnode, pVnode->commitFirstKey, pVnode->lastKeyOnFile, numOfFiles, pVnode->fileId, pVnode->numOfFiles); + + if (numOfFiles >= pVnode->numOfFiles) { + // create empty header files backward + filesAdded = numOfFiles - pVnode->numOfFiles + 1; + for (int i = 0; i < filesAdded; ++i) { + fileId = pVnode->fileId - pVnode->numOfFiles - i; + if (vnodeCreateEmptyCompFile(vnode, fileId) < 0) return -1; + } + } else if (numOfFiles < 0) { + // create empty header files forward + pVnode->fileId++; + if (vnodeCreateEmptyCompFile(vnode, pVnode->fileId) < 0) return -1; + pVnode->lastKeyOnFile += (long)tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile; + filesAdded = 1; + numOfFiles = 0; // hacker way + } + + fileId = pVnode->fileId - numOfFiles; + pVnode->commitLastKey = + pVnode->lastKeyOnFile - (long)numOfFiles * tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile; + pVnode->commitFirstKey = pVnode->commitLastKey - (long)tsMsPerDay[pVnode->cfg.precision] * pCfg->daysPerFile + 1; + pVnode->commitFileId = fileId; + pVnode->numOfFiles = pVnode->numOfFiles + filesAdded; + + dTrace( + "vid:%d, commit fileId:%d, commitLastKey:%ld, vnodeLastKey:%ld, " + "lastKeyOnFile:%ld numOfFiles:%d", + vnode, fileId, pVnode->commitLastKey, pVnode->lastKey, pVnode->lastKeyOnFile, pVnode->numOfFiles); + + int minSize = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM) + TSDB_FILE_HEADER_LEN; + + vnodeGetHeadDataLname(pVnode->cfn, name, pVnode->lfn, vnode, fileId); + readlink(pVnode->cfn, dHeadName, TSDB_FILENAME_LEN); + readlink(pVnode->lfn, dLastName, TSDB_FILENAME_LEN); + len = strlen(dHeadName); + if (dHeadName[len - 1] == 'd') { + dHeadName[len] = '0'; + dHeadName[len + 1] = '\0'; + } else { + dHeadName[len - 1] = '0' + (dHeadName[len - 1] + 1 - '0') % 2; + } + len = strlen(dLastName); + if (dLastName[len - 1] == 't') { + dLastName[len] = '0'; + dLastName[len + 1] = '\0'; + } else { + dLastName[len - 1] = '0' + (dLastName[len - 1] + 1 - '0') % 2; + } + vnodeGetHeadTname(pVnode->nfn, pVnode->tfn, vnode, fileId); + symlink(dHeadName, pVnode->nfn); + if (!noTempLast) symlink(dLastName, pVnode->tfn); + + // open head file + pVnode->hfd = open(pVnode->cfn, O_RDONLY); + if (pVnode->hfd < 0) { + dError("vid:%d, failed to open head file:%s, reason:%s", vnode, pVnode->cfn, strerror(errno)); + taosLogError("vid:%d, failed to open head file:%s, reason:%s", vnode, pVnode->cfn, strerror(errno)); + goto _error; + } + + // verify head file, check size + fstat(pVnode->hfd, &filestat); + if (filestat.st_size < minSize) { + dError("vid:%d, head file:%s corrupted", vnode, pVnode->cfn); + taosLogError("vid:%d, head file:%s corrupted", vnode, pVnode->cfn); + goto _error; + } + + // open a new header file + pVnode->nfd = open(pVnode->nfn, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (pVnode->nfd < 0) { + dError("vid:%d, failed to open new head file:%s, reason:%s", vnode, pVnode->nfn, strerror(errno)); + taosLogError("vid:%d, failed to open new head file:%s, reason:%s", vnode, pVnode->nfn, strerror(errno)); + goto _error; + } + vnodeCreateFileHeaderFd(pVnode->nfd); + + // open existing data file + pVnode->dfd = open(name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + if (pVnode->dfd < 0) { + dError("vid:%d, failed to open data file:%s, reason:%s", vnode, name, strerror(errno)); + taosLogError("vid:%d, failed to open data file:%s, reason:%s", vnode, name, strerror(errno)); + goto _error; + } + + // verify data file, check size + fstat(pVnode->dfd, &filestat); + if (filestat.st_size < TSDB_FILE_HEADER_LEN) { + dError("vid:%d, data file:%s corrupted", vnode, name); + taosLogError("vid:%d, data file:%s corrupted", vnode, name); + goto _error; + } else { + dTrace("vid:%d, data file:%s is opened to write", vnode, name); + } + + // open last file + pVnode->lfd = open(pVnode->lfn, O_RDWR); + if (pVnode->lfd < 0) { + dError("vid:%d, failed to open last file:%s, reason:%s", vnode, pVnode->lfn, strerror(errno)); + taosLogError("vid:%d, failed to open last file:%s, reason:%s", vnode, pVnode->lfn, strerror(errno)); + goto _error; + } + + // verify last file, check size + fstat(pVnode->lfd, &filestat); + if (filestat.st_size < TSDB_FILE_HEADER_LEN) { + dError("vid:%d, last file:%s corrupted", vnode, pVnode->lfn); + taosLogError("vid:%d, last file:%s corrupted", vnode, pVnode->lfn); + goto _error; + } + + // open a new last file + if (noTempLast) { + pVnode->tfd = -1; // do not open temporary last file + } else { + pVnode->tfd = open(pVnode->tfn, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (pVnode->tfd < 0) { + dError("vid:%d, failed to open new last file:%s, reason:%s", vnode, pVnode->tfn, strerror(errno)); + taosLogError("vid:%d, failed to open new last file:%s, reason:%s", vnode, pVnode->tfn, strerror(errno)); + goto _error; + } + vnodeCreateFileHeaderFd(pVnode->tfd); + pVnode->lfSize = lseek(pVnode->tfd, 0, SEEK_END); + } + + int size = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM); + char *temp = malloc(size); + memset(temp, 0, size); + taosCalcChecksumAppend(0, (uint8_t *)temp, size); + write(pVnode->nfd, temp, size); + free(temp); + + pVnode->dfSize = lseek(pVnode->dfd, 0, SEEK_END); + + return 0; + +_error: + if (pVnode->dfd > 0) close(pVnode->dfd); + pVnode->dfd = 0; + + if (pVnode->hfd > 0) close(pVnode->hfd); + pVnode->hfd = 0; + + if (pVnode->nfd > 0) close(pVnode->nfd); + pVnode->nfd = 0; + + if (pVnode->lfd > 0) close(pVnode->lfd); + pVnode->lfd = 0; + + if (pVnode->tfd > 0) close(pVnode->tfd); + pVnode->tfd = 0; + + return -1; +} + +void vnodeRemoveFile(int vnode, int fileId) { + char headName[TSDB_FILENAME_LEN] = "\0"; + char dataName[TSDB_FILENAME_LEN] = "\0"; + char lastName[TSDB_FILENAME_LEN] = "\0"; + char dHeadName[TSDB_FILENAME_LEN] = "\0"; + char dDataName[TSDB_FILENAME_LEN] = "\0"; + char dLastName[TSDB_FILENAME_LEN] = "\0"; + SVnodeObj * pVnode = NULL; + SVnodeHeadInfo headInfo; + + pVnode = vnodeList + vnode; + + vnodeGetHeadDataLname(headName, dataName, lastName, vnode, fileId); + vnodeGetDnameFromLname(headName, dataName, lastName, dHeadName, dDataName, dLastName); + + int fd = open(headName, O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + if (fd > 0) { + vnodeGetHeadFileHeaderInfo(fd, &headInfo); + __sync_fetch_and_add(&(pVnode->vnodeStatistic.totalStorage), -headInfo.totalStorage); + close(fd); + } + + remove(headName); + remove(dataName); + remove(lastName); + remove(dHeadName); + remove(dDataName); + remove(dLastName); + + dTrace("vid:%d fileId:%d on disk: %s is removed, numOfFiles:%d maxFiles:%d", vnode, fileId, tsDirectory, + pVnode->numOfFiles, pVnode->maxFiles); +} + +void vnodeCloseCommitFiles(SVnodeObj *pVnode) { + char dpath[TSDB_FILENAME_LEN] = "\0"; + int fileId; + int ret; + int file_removed = 0; + + close(pVnode->nfd); + pVnode->nfd = 0; + + close(pVnode->hfd); + pVnode->hfd = 0; + + close(pVnode->dfd); + pVnode->dfd = 0; + + close(pVnode->lfd); + pVnode->lfd = 0; + + if (pVnode->tfd > 0) close(pVnode->tfd); + + pthread_mutex_lock(&(pVnode->vmutex)); + + readlink(pVnode->cfn, dpath, TSDB_FILENAME_LEN); + ret = rename(pVnode->nfn, pVnode->cfn); + if (ret < 0) { + dError("vid:%d, failed to rename:%s, reason:%s", pVnode->vnode, pVnode->nfn, strerror(errno)); + } + remove(dpath); + + if (pVnode->tfd > 0) { + memset(dpath, 0, TSDB_FILENAME_LEN); + readlink(pVnode->lfn, dpath, TSDB_FILENAME_LEN); + ret = rename(pVnode->tfn, pVnode->lfn); + if (ret < 0) { + dError("vid:%d, failed to rename:%s, reason:%s", pVnode->vnode, pVnode->tfn, strerror(errno)); + } + remove(dpath); + } + + pthread_mutex_unlock(&(pVnode->vmutex)); + + pVnode->tfd = 0; + + dTrace("vid:%d, %s and %s is saved", pVnode->vnode, pVnode->cfn, pVnode->lfn); + + if (pVnode->numOfFiles > pVnode->maxFiles) { + fileId = pVnode->fileId - pVnode->numOfFiles + 1; + vnodeRemoveFile(pVnode->vnode, fileId); + pVnode->numOfFiles--; + file_removed = 1; + } + + if (!file_removed) vnodeUpdateFileMagic(pVnode->vnode, pVnode->commitFileId); + vnodeSaveAllMeterObjToFile(pVnode->vnode); + + return; +} + +void vnodeBroadcastStatusToUnsyncedPeer(SVnodeObj *pVnode); + +void *vnodeCommitMultiToFile(SVnodeObj *pVnode, int ssid, int esid) { + int vnode = pVnode->vnode; + SData * data[TSDB_MAX_COLUMNS], *cdata[TSDB_MAX_COLUMNS]; // first 4 bytes are length + char * buffer = NULL, *dmem = NULL, *cmem = NULL, *hmem = NULL, *tmem = NULL; + SMeterObj * pObj = NULL; + SCompInfo compInfo = {0}; + SCompHeader * pHeader; + SMeterInfo * meterInfo = NULL, *pMeter = NULL; + SQuery query; + SColumnFilter colList[TSDB_MAX_COLUMNS] = {0}; + SSqlFunctionExpr pExprs[TSDB_MAX_COLUMNS] = {0}; + int commitAgain; + int headLen, sid, col; + long pointsRead; + long pointsReadLast; + SCompBlock * pCompBlock = NULL; + SVnodeCfg * pCfg = &pVnode->cfg; + TSCKSUM chksum; + SVnodeHeadInfo headInfo; + uint8_t * pOldCompBlocks; + + dPrint("vid:%d, committing to file, firstKey:%ld lastKey:%ld ssid:%d esid:%d", vnode, pVnode->firstKey, + pVnode->lastKey, ssid, esid); + if (pVnode->lastKey == 0) goto _over; + + vnodeRenewCommitLog(vnode); + + // get the MAX consumption buffer for this vnode + int32_t maxBytesPerPoint = 0; + int32_t minBytesPerPoint = INT32_MAX; + for (sid = ssid; sid <= esid; ++sid) { + pObj = (SMeterObj *)(pVnode->meterList[sid]); + if ((pObj == NULL) || (pObj->pCache == NULL)) continue; + + if (maxBytesPerPoint < pObj->bytesPerPoint) { + maxBytesPerPoint = pObj->bytesPerPoint; + } + if (minBytesPerPoint > pObj->bytesPerPoint) { + minBytesPerPoint = pObj->bytesPerPoint; + } + } + + // buffer to hold the temp head + int tcachblocks = pCfg->cacheBlockSize / (minBytesPerPoint * pCfg->rowsInFileBlock); + + int hmsize = + (pCfg->cacheNumOfBlocks.totalBlocks * (MAX(tcachblocks, 1) + 1) + pCfg->maxSessions) * sizeof(SCompBlock); + + // buffer to hold the uncompressed data + int dmsize = + maxBytesPerPoint * pCfg->rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES + sizeof(TSCKSUM)) * TSDB_MAX_COLUMNS; + + // buffer to hold the compressed data + int cmsize = + maxBytesPerPoint * pCfg->rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES + sizeof(TSCKSUM)) * TSDB_MAX_COLUMNS; + + // buffer to hold compHeader + int tmsize = sizeof(SCompHeader) * pCfg->maxSessions + sizeof(TSCKSUM); + + // buffer to hold meterInfo + int misize = pVnode->cfg.maxSessions * sizeof(SMeterInfo); + + int totalSize = hmsize + dmsize + cmsize + misize + tmsize; + buffer = malloc(totalSize); + if (buffer == NULL) { + dError("no enough memory for committing buffer"); + return NULL; + } + + hmem = buffer; + dmem = hmem + hmsize; + cmem = dmem + dmsize; + tmem = cmem + cmsize; + meterInfo = (SMeterInfo *)(tmem + tmsize); + + pthread_mutex_lock(&(pVnode->vmutex)); + pVnode->commitFirstKey = pVnode->firstKey; + pVnode->firstKey = pVnode->lastKey + 1; + pthread_mutex_unlock(&(pVnode->vmutex)); + +_again: + pVnode->commitInProcess = 1; + commitAgain = 0; + memset(hmem, 0, totalSize); + memset(&query, 0, sizeof(query)); + + if (vnodeOpenCommitFiles(pVnode, ssid) < 0) goto _over; + dTrace("vid:%d, start to commit, commitFirstKey:%ld commitLastKey:%ld", vnode, pVnode->commitFirstKey, + pVnode->commitLastKey); + + headLen = 0; + vnodeGetHeadFileHeaderInfo(pVnode->hfd, &headInfo); + int maxOldBlocks = 1; + + // read head info + if (pVnode->hfd) { + lseek(pVnode->hfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + if (read(pVnode->hfd, tmem, tmsize) <= 0) { + dError("vid:%d, failed to read old header file:%s", vnode, pVnode->cfn); + taosLogError("vid:%d, failed to read old header file:%s", vnode, pVnode->cfn); + goto _over; + } else { + if (!taosCheckChecksumWhole((uint8_t *)tmem, tmsize)) { + dError("vid:%d, failed to read old header file:%s since comp header offset is broken", + vnode, pVnode->cfn); + taosLogError("vid:%d, failed to read old header file:%s since comp header offset is broken", + vnode, pVnode->cfn); + goto _over; + } + } + } + + // read compInfo + for (sid = 0; sid < pCfg->maxSessions; ++sid) { + pObj = (SMeterObj *)(pVnode->meterList[sid]); + if (pObj == NULL) continue; + + pMeter = meterInfo + sid; + pHeader = ((SCompHeader *)tmem) + sid; + + if (pVnode->hfd > 0) { + if (pHeader->compInfoOffset > 0) { + lseek(pVnode->hfd, pHeader->compInfoOffset, SEEK_SET); + if (read(pVnode->hfd, &compInfo, sizeof(compInfo)) == sizeof(compInfo)) { + if (!taosCheckChecksumWhole((uint8_t *)(&compInfo), sizeof(SCompInfo))) { + dError("vid:%d sid:%d id:%s, failed to read compinfo in file:%s since checksum mismatch", + vnode, sid, pObj->meterId, pVnode->cfn); + taosLogError("vid:%d sid:%d id:%s, failed to read compinfo in file:%s since checksum mismatch", + vnode, sid, pObj->meterId, pVnode->cfn); + goto _over; + } else { + if (pObj->uid == compInfo.uid) { + pMeter->oldNumOfBlocks = compInfo.numOfBlocks; + pMeter->oldCompBlockOffset = pHeader->compInfoOffset + sizeof(SCompInfo); + pMeter->last = compInfo.last; + if (compInfo.numOfBlocks > maxOldBlocks) maxOldBlocks = compInfo.numOfBlocks; + if (pMeter->last) { + lseek(pVnode->hfd, sizeof(SCompBlock) * (compInfo.numOfBlocks - 1), SEEK_CUR); + read(pVnode->hfd, &pMeter->lastBlock, sizeof(SCompBlock)); + } + } else { + dTrace("vid:%d sid:%d id:%s, uid:%ld is not matched w/ old:%ld, old data will be thrown away", + vnode, sid, pObj->meterId, pObj->uid, compInfo.uid); + pMeter->oldNumOfBlocks = 0; + } + } + } else { + dError("vid:%d sid:%d id:%s, failed to read compinfo in file:%s", vnode, sid, pObj->meterId, pVnode->cfn); + goto _over; + } + } + } + } + // Loop To write data to fileId + for (sid = ssid; sid <= esid; ++sid) { + pObj = (SMeterObj *)(pVnode->meterList[sid]); + if ((pObj == NULL) || (pObj->pCache == NULL)) continue; + + data[0] = (SData *)dmem; + cdata[0] = (SData *)cmem; + for (col = 1; col < pObj->numOfColumns; ++col) { + data[col] = (SData *)(((char *)data[col - 1]) + sizeof(SData) + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes + EXTRA_BYTES + sizeof(TSCKSUM)); + cdata[col] = (SData *)(((char *)cdata[col - 1]) + sizeof(SData) + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes + EXTRA_BYTES + sizeof(TSCKSUM)); + } + + pMeter = meterInfo + sid; + pMeter->tempHeadOffset = headLen; + + memset(&query, 0, sizeof(query)); + query.colList = colList; + query.pSelectExpr = pExprs; + + query.ekey = pVnode->commitLastKey; + query.skey = pVnode->commitFirstKey; + query.lastKey = query.skey; + + query.sdata = data; + vnodeSetCommitQuery(pObj, &query); + + dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%lld slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->lastKeyOnFile, query.slot, query.pos); + + pointsRead = 0; + pointsReadLast = 0; + + // last block is at last file + if (pMeter->last) { + if (pMeter->lastBlock.sversion != pObj->sversion) { + // TODO : Check the correctness of this code. write the last block to + // .data file + pCompBlock = (SCompBlock *)(hmem + headLen); + assert(dmem - (char *)pCompBlock >= sizeof(SCompBlock)); + *pCompBlock = pMeter->lastBlock; + pCompBlock->last = 0; + pCompBlock->offset = lseek(pVnode->dfd, 0, SEEK_END); + lseek(pVnode->lfd, pMeter->lastBlock.offset, SEEK_SET); + sendfile(pVnode->dfd, pVnode->lfd, NULL, pMeter->lastBlock.len); + pVnode->dfSize = pCompBlock->offset + pMeter->lastBlock.len; + + headLen += sizeof(SCompBlock); + pMeter->newNumOfBlocks++; + } else { + // read last block into memory + if (vnodeReadLastBlockToMem(pObj, &pMeter->lastBlock, data) < 0) goto _over; + pointsReadLast = pMeter->lastBlock.numOfPoints; + query.over = 0; + headInfo.totalStorage -= (pointsReadLast * pObj->bytesPerPoint); + dTrace("vid:%d sid:%d id:%s, points:%d in last block will be merged to new block", + pObj->vnode, pObj->sid, pObj->meterId, pointsReadLast); + } + + pMeter->changed = 1; + pMeter->last = 0; + pMeter->oldNumOfBlocks--; + } + + while (query.over == 0) { + pCompBlock = (SCompBlock *)(hmem + headLen); + assert(dmem - (char *)pCompBlock >= sizeof(SCompBlock)); + pointsRead += pointsReadLast; + + while (pointsRead < pObj->pointsPerFileBlock) { + query.pointsToRead = pObj->pointsPerFileBlock - pointsRead; + query.pointsOffset = pointsRead; + pointsRead += vnodeQueryFromCache(pObj, &query); + if (query.over) break; + } + + if (pointsRead == 0) break; + + headInfo.totalStorage += ((pointsRead - pointsReadLast) * pObj->bytesPerPoint); + pCompBlock->last = 1; + if (vnodeWriteBlockToFile(pObj, pCompBlock, data, cdata, pointsRead) < 0) goto _over; + if (pCompBlock->keyLast > pObj->lastKeyOnFile) pObj->lastKeyOnFile = pCompBlock->keyLast; + pMeter->last = pCompBlock->last; + + // write block info into header buffer + headLen += sizeof(SCompBlock); + pMeter->newNumOfBlocks++; + pMeter->committedPoints += (pointsRead - pointsReadLast); + + dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%lld, slot:%d pos:%d newNumOfBlocks:%d headLen:%d", + pObj->vnode, pObj->sid, pObj->meterId, pointsRead, pointsReadLast, pObj->lastKeyOnFile, query.slot, + query.pos, pMeter->newNumOfBlocks, headLen); + + if (pointsRead < pObj->pointsPerFileBlock || query.keyIsMet) break; + + pointsRead = 0; + pointsReadLast = 0; + } + + dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%lld slot:%d pos:%d newNumOfBlocks:%d", + pObj->vnode, pObj->sid, pObj->meterId, pMeter->committedPoints, pObj->lastKeyOnFile, query.slot, query.pos, + pMeter->newNumOfBlocks); + + if (pMeter->committedPoints > 0) { + pMeter->commitSlot = query.slot; + pMeter->commitPos = query.pos; + } + + TSKEY nextKey = 0; + if (pObj->lastKey > pVnode->commitLastKey) + nextKey = pVnode->commitLastKey + 1; + else if (pObj->lastKey > pObj->lastKeyOnFile) + nextKey = pObj->lastKeyOnFile + 1; + + pthread_mutex_lock(&(pVnode->vmutex)); + if (nextKey < pVnode->firstKey && nextKey > 1) pVnode->firstKey = nextKey; + pthread_mutex_unlock(&(pVnode->vmutex)); + } + + if (pVnode->lastKey > pVnode->commitLastKey) commitAgain = 1; + + dTrace("vid:%d, finish appending the data file", vnode); + + // calculate the new compInfoOffset + int compInfoOffset = TSDB_FILE_HEADER_LEN + tmsize; + for (sid = 0; sid < pCfg->maxSessions; ++sid) { + pObj = (SMeterObj *)(pVnode->meterList[sid]); + pHeader = ((SCompHeader *)tmem) + sid; + if (pObj == NULL) { + pHeader->compInfoOffset = 0; + continue; + } + + pMeter = meterInfo + sid; + pMeter->compInfoOffset = compInfoOffset; + pMeter->finalNumOfBlocks = pMeter->oldNumOfBlocks + pMeter->newNumOfBlocks; + + if (pMeter->finalNumOfBlocks > 0) { + pHeader->compInfoOffset = pMeter->compInfoOffset; + compInfoOffset += sizeof(SCompInfo) + pMeter->finalNumOfBlocks * sizeof(SCompBlock) + sizeof(TSCKSUM); + } + dTrace("vid:%d sid:%d id:%s, oldBlocks:%d numOfBlocks:%d compInfoOffset:%d", pObj->vnode, pObj->sid, pObj->meterId, + pMeter->oldNumOfBlocks, pMeter->finalNumOfBlocks, compInfoOffset); + } + + // write the comp header into new file + vnodeUpdateHeadFileHeader(pVnode->nfd, &headInfo); + lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + taosCalcChecksumAppend(0, (uint8_t *)tmem, tmsize); + if (write(pVnode->nfd, tmem, tmsize) <= 0) { + dError("vid:%d sid:%d id:%s, failed to write:%s, error:%s", vnode, sid, pObj->meterId, pVnode->nfn, + strerror(errno)); + goto _over; + } + + pOldCompBlocks = (uint8_t *)malloc(sizeof(SCompBlock) * maxOldBlocks); + + // write the comp block list in new file + for (sid = 0; sid < pCfg->maxSessions; ++sid) { + pObj = (SMeterObj *)(pVnode->meterList[sid]); + if (pObj == NULL) continue; + + pMeter = meterInfo + sid; + if (pMeter->finalNumOfBlocks <= 0) continue; + + compInfo.last = pMeter->last; + compInfo.uid = pObj->uid; + compInfo.numOfBlocks = pMeter->finalNumOfBlocks; + /* compInfo.compBlockLen = pMeter->finalCompBlockLen; */ + compInfo.delimiter = TSDB_VNODE_DELIMITER; + taosCalcChecksumAppend(0, (uint8_t *)(&compInfo), sizeof(SCompInfo)); + lseek(pVnode->nfd, pMeter->compInfoOffset, SEEK_SET); + if (write(pVnode->nfd, &compInfo, sizeof(compInfo)) <= 0) { + dError("vid:%d sid:%d id:%s, failed to write:%s, reason:%s", vnode, sid, pObj->meterId, pVnode->nfn, + strerror(errno)); + goto _over; + } + + // write the old comp blocks + chksum = 0; + if (pVnode->hfd && pMeter->oldNumOfBlocks) { + lseek(pVnode->hfd, pMeter->oldCompBlockOffset, SEEK_SET); + if (pMeter->changed) { + int compBlockLen = pMeter->oldNumOfBlocks * sizeof(SCompBlock); + read(pVnode->hfd, pOldCompBlocks, compBlockLen); + write(pVnode->nfd, pOldCompBlocks, compBlockLen); + chksum = taosCalcChecksum(0, pOldCompBlocks, compBlockLen); + } else { + sendfile(pVnode->nfd, pVnode->hfd, NULL, pMeter->oldNumOfBlocks * sizeof(SCompBlock)); + read(pVnode->hfd, &chksum, sizeof(TSCKSUM)); + } + } + + if (pMeter->newNumOfBlocks) { + chksum = taosCalcChecksum(chksum, (uint8_t *)(hmem + pMeter->tempHeadOffset), + pMeter->newNumOfBlocks * sizeof(SCompBlock)); + if (write(pVnode->nfd, hmem + pMeter->tempHeadOffset, pMeter->newNumOfBlocks * sizeof(SCompBlock)) <= 0) { + dError("vid:%d sid:%d id:%s, failed to write:%s, reason:%s", vnode, sid, pObj->meterId, pVnode->nfn, + strerror(errno)); + goto _over; + } + } + write(pVnode->nfd, &chksum, sizeof(TSCKSUM)); + } + + tfree(pOldCompBlocks); + dTrace("vid:%d, finish writing the new header file:%s", vnode, pVnode->nfn); + vnodeCloseCommitFiles(pVnode); + + for (sid = ssid; sid <= esid; ++sid) { + pObj = (SMeterObj *)(pVnode->meterList[sid]); + if (pObj == NULL) continue; + + pMeter = meterInfo + sid; + if (pMeter->finalNumOfBlocks <= 0) continue; + + if (pMeter->committedPoints > 0) { + vnodeUpdateCommitInfo(pObj, pMeter->commitSlot, pMeter->commitPos, pMeter->commitCount); + } + } + + if (commitAgain) { + pVnode->commitFirstKey = pVnode->commitLastKey + 1; + goto _again; + } + + vnodeRemoveCommitLog(vnode); + +_over: + pVnode->commitInProcess = 0; + vnodeCommitOver(pVnode); + memset(&(vnodeList[vnode].commitThread), 0, sizeof(vnodeList[vnode].commitThread)); + tfree(buffer); + tfree(pOldCompBlocks); + + dPrint("vid:%d, committing is over", vnode); + + return pVnode; +} + +void *vnodeCommitToFile(void *param) { + SVnodeObj *pVnode = (SVnodeObj *)param; + + return vnodeCommitMultiToFile(pVnode, 0, pVnode->cfg.maxSessions - 1); +} + +int vnodeGetCompBlockInfo(SMeterObj *pObj, SQuery *pQuery) { + char prefix[TSDB_FILENAME_LEN]; + char fileName[TSDB_FILENAME_LEN]; + SCompHeader compHeader; + SCompInfo compInfo; + struct stat fstat; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + char * buffer = NULL; + TSCKSUM chksum; + + vnodeFreeFields(pQuery); + tfree(pQuery->pBlock); + + pQuery->numOfBlocks = 0; + SVnodeCfg *pCfg = &vnodeList[pObj->vnode].cfg; + + if (pQuery->hfd > 0) close(pQuery->hfd); + sprintf(prefix, "%s/vnode%d/db/v%df%d", tsDirectory, pObj->vnode, pObj->vnode, pQuery->fileId); + + sprintf(fileName, "%s.head", prefix); + pthread_mutex_lock(&(pVnode->vmutex)); + pQuery->hfd = open(fileName, O_RDONLY); + pthread_mutex_unlock(&(pVnode->vmutex)); + + if (pQuery->hfd < 0) { + dError("vid:%d sid:%d id:%s, failed to open head file:%s, reason:%s", + pObj->vnode, pObj->sid, pObj->meterId, fileName, strerror(errno)); + return -TSDB_CODE_FILE_CORRUPTED; + } + + int tmsize = sizeof(SCompHeader) * pCfg->maxSessions + sizeof(TSCKSUM); + buffer = (char *)calloc(1, tmsize); + if (buffer == NULL) { + dError("vid:%d sid:%d id:%s, failed to allocate memory to buffer", pObj->vnode, pObj->sid, pObj->meterId); + return -TSDB_CODE_APP_ERROR; + } + + lseek(pQuery->hfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + if (read(pQuery->hfd, buffer, tmsize) != tmsize) { + dError("vid:%d sid:%d id:%s, file:%s failed to read comp header, reason:%s", pObj->vnode, pObj->sid, pObj->meterId, + fileName, strerror(errno)); + taosLogError("vid:%d sid:%d id:%s, file:%s failed to read comp header", pObj->vnode, pObj->sid, pObj->meterId, + fileName); + tfree(buffer); + return -TSDB_CODE_FILE_CORRUPTED; + } + + if (!taosCheckChecksumWhole((uint8_t *)buffer, tmsize)) { + dError("vid:%d sid:%d id:%s, file:%s comp header offset is broken", pObj->vnode, pObj->sid, pObj->meterId, + fileName); + taosLogError("vid:%d sid:%d id:%s, file:%s comp header offset is broken", pObj->vnode, pObj->sid, pObj->meterId, + fileName); + tfree(buffer); + return -TSDB_CODE_FILE_CORRUPTED; + } + compHeader = ((SCompHeader *)buffer)[pObj->sid]; + tfree(buffer); + if (compHeader.compInfoOffset == 0) return 0; + + lseek(pQuery->hfd, compHeader.compInfoOffset, SEEK_SET); + read(pQuery->hfd, &compInfo, sizeof(SCompInfo)); + if (!taosCheckChecksumWhole((uint8_t *)(&compInfo), sizeof(SCompInfo))) { + dError("vid:%d sid:%d id:%s, file:%s compInfo checksum mismatch", + pObj->vnode, pObj->sid, pObj->meterId, fileName); + taosLogError("vid:%d sid:%d id:%s, file:%s compInfo checksum mismatch", + pObj->vnode, pObj->sid, pObj->meterId, fileName); + return -TSDB_CODE_FILE_CORRUPTED; + } + if (compInfo.numOfBlocks <= 0) return 0; + if (compInfo.uid != pObj->uid) return 0; + + pQuery->numOfBlocks = compInfo.numOfBlocks; + pQuery->pBlock = (SCompBlock *)calloc(1, (sizeof(SCompBlock) + sizeof(SField *)) * compInfo.numOfBlocks); + pQuery->pFields = (SField **)((char *)pQuery->pBlock + sizeof(SCompBlock) * compInfo.numOfBlocks); + + /* char *pBlock = (char *)pQuery->pBlockFields + sizeof(SCompBlockFields)*compInfo.numOfBlocks; */ + read(pQuery->hfd, pQuery->pBlock, compInfo.numOfBlocks * sizeof(SCompBlock)); + read(pQuery->hfd, &chksum, sizeof(TSCKSUM)); + if (chksum != taosCalcChecksum(0, (uint8_t *)(pQuery->pBlock), + compInfo.numOfBlocks * sizeof(SCompBlock))) { + dError("vid:%d sid:%d id:%s, head file comp block broken, fileId: %d", + pObj->vnode, pObj->sid, pObj->meterId, pQuery->fileId); + taosLogError("vid:%d sid:%d id:%s, head file comp block broken, fileId: %d", + pObj->vnode, pObj->sid, pObj->meterId, pQuery->fileId); + return -TSDB_CODE_FILE_CORRUPTED; + } + + close(pQuery->hfd); + pQuery->hfd = -1; + + sprintf(fileName, "%s.data", prefix); + if (stat(fileName, &fstat) < 0) { + dError("vid:%d sid:%d id:%s, data file:%s not there!", pObj->vnode, + pObj->sid, pObj->meterId, fileName); + return -TSDB_CODE_FILE_CORRUPTED; + } + + if (pQuery->dfd > 0) close(pQuery->dfd); + pQuery->dfd = open(fileName, O_RDONLY); + if (pQuery->dfd < 0) { + dError("vid:%d sid:%d id:%s, failed to open data file:%s, reason:%s", + pObj->vnode, pObj->sid, pObj->meterId, fileName, strerror(errno)); + return -TSDB_CODE_FILE_CORRUPTED; + } + + sprintf(fileName, "%s.last", prefix); + if (stat(fileName, &fstat) < 0) { + dError("vid:%d sid:%d id:%s, last file:%s not there!", pObj->vnode, + pObj->sid, pObj->meterId, fileName); + return -TSDB_CODE_FILE_CORRUPTED; + } + + if (pQuery->lfd > 0) close(pQuery->lfd); + pQuery->lfd = open(fileName, O_RDONLY); + if (pQuery->lfd < 0) { + dError("vid:%d sid:%d id:%s, failed to open last file:%s, reason:%s", + pObj->vnode, pObj->sid, pObj->meterId, fileName, strerror(errno)); + return -TSDB_CODE_FILE_CORRUPTED; + } + + return pQuery->numOfBlocks; +} + +int vnodeReadColumnToMem(int fd, SCompBlock *pBlock, SField **fields, int col, char *data, int dataSize, + char *temp, char *buffer, int bufferSize) { + int len = 0, size = 0; + SField *tfields = NULL; + TSCKSUM chksum = 0; + + if (*fields == NULL) { + size = sizeof(SField) * (pBlock->numOfCols) + sizeof(TSCKSUM); + *fields = (SField *)calloc(1, size); + lseek(fd, pBlock->offset, SEEK_SET); + read(fd, *fields, size); + if (!taosCheckChecksumWhole((uint8_t *)(*fields), size)) { + dError("SField checksum error, col: %d", col); + taosLogError("SField checksum error, col: %d", col); + return -1; + } + } + + tfields = *fields; + + /* If data is NULL, that means only to read SField content. So no need to read data part. */ + if (data == NULL) return 0; + + lseek(fd, pBlock->offset + tfields[col].offset, SEEK_SET); + + if (pBlock->algorithm) { + len = read(fd, temp, tfields[col].len); + read(fd, &chksum, sizeof(TSCKSUM)); + if (chksum != taosCalcChecksum(0, (uint8_t *)temp, tfields[col].len)) { + dError("data column checksum error, col: %d", col); + taosLogError("data column checksum error, col: %d", col); + return -1; + } + + (*pDecompFunc[tfields[col].type])(temp, tfields[col].len, pBlock->numOfPoints, data, dataSize, + pBlock->algorithm, buffer, bufferSize); + + } else { + len = read(fd, data, tfields[col].len); + read(fd, &chksum, sizeof(TSCKSUM)); + if (chksum != taosCalcChecksum(0, (uint8_t *)data, tfields[col].len)) { + dError("data column checksum error, col: %d", col); + taosLogError("data column checksum error, col: %d", col); + return -1; + } + } + + if (len <= 0) { + dError("failed to read col:%d, offset:%ld, reason:%s", col, tfields[col].offset, strerror(errno)); + return -1; + } + + return 0; +} + +int vnodeReadCompBlockToMem(SMeterObj *pObj, SQuery *pQuery, SData *sdata[]) { + char * temp = NULL; + int i = 0, col = 0, code = 0; + SCompBlock *pBlock = NULL; + SField ** pFields = NULL; + char * buffer = NULL; + int bufferSize = 0; + int dfd = pQuery->dfd; + + tfree(pQuery->pFields[pQuery->slot]); + + pBlock = pQuery->pBlock + pQuery->slot; + pFields = pQuery->pFields + pQuery->slot; + temp = malloc(pObj->bytesPerPoint * (pBlock->numOfPoints + 1)); + + if (pBlock->last) dfd = pQuery->lfd; + + if (pBlock->algorithm == TWO_STAGE_COMP) { + bufferSize = pObj->maxBytes * pBlock->numOfPoints + EXTRA_BYTES; + buffer = (char *)calloc(1, bufferSize); + } + + if (pQuery->colList[0].colIdx != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + // load timestamp column first in any cases. + code = vnodeReadColumnToMem(dfd, pBlock, pFields, PRIMARYKEY_TIMESTAMP_COL_INDEX, + pQuery->tsData->data + pQuery->pointsOffset * TSDB_KEYSIZE, + TSDB_KEYSIZE*pBlock->numOfPoints, temp, buffer, bufferSize); + col = 1; + } else { + // Read the SField data for this block first, if timestamp column is retrieved in this query, we ignore this process + code = vnodeReadColumnToMem(dfd, pBlock, pFields, 0, NULL, 0, NULL, buffer, bufferSize); + } + + if (code < 0) goto _over; + + while (col < pBlock->numOfCols && i < pQuery->numOfCols) { + SColumnFilterMsg *pColFilterMsg = &pQuery->colList[i].data; + if ((*pFields)[col].colId < pColFilterMsg->colId) { + ++col; + } else if ((*pFields)[col].colId == pColFilterMsg->colId) { + code = vnodeReadColumnToMem(dfd, pBlock, pFields, col, sdata[i]->data, pColFilterMsg->bytes*pBlock->numOfPoints, + temp, buffer, bufferSize); + if (code < 0) goto _over; + ++i; + ++col; + } else { + /* + * pQuery->colList[i].colIdx < (*pFields)[col].colId, this column is not existed in current block, + * fill space with NULL value + */ + char * output = sdata[i]->data; + int32_t bytes = pQuery->colList[i].data.bytes; + int32_t type = pQuery->colList[i].data.type; + + setNullN(output, type, bytes, pBlock->numOfPoints); + ++i; + } + } + + if (col >= pBlock->numOfCols && i < pQuery->numOfCols) { + // remain columns need to set null value + while (i < pQuery->numOfCols) { + char * output = sdata[i]->data; + int32_t bytes = pQuery->colList[i].data.bytes; + int32_t type = pQuery->colList[i].data.type; + + setNullN(output, type, bytes, pBlock->numOfPoints); + ++i; + } + } + +_over: + tfree(buffer); + tfree(temp); + if ( code < 0 ) code = -TSDB_CODE_FILE_CORRUPTED; + return code; +} + +int vnodeReadLastBlockToMem(SMeterObj *pObj, SCompBlock *pBlock, SData *sdata[]) { + char * temp = NULL; + int col = 0, code = 0; + SField *pFields = NULL; + char * buffer = NULL; + int bufferSize = 0; + + SVnodeObj *pVnode = vnodeList + pObj->vnode; + temp = malloc(pObj->bytesPerPoint * (pBlock->numOfPoints + 1)); + if (pBlock->algorithm == TWO_STAGE_COMP) { + bufferSize = pObj->maxBytes*pBlock->numOfPoints+EXTRA_BYTES; + buffer = (char *)calloc(1, pObj->maxBytes * pBlock->numOfPoints + EXTRA_BYTES); + } + + for (col = 0; col < pBlock->numOfCols; ++col) { + code = vnodeReadColumnToMem(pVnode->lfd, pBlock, &pFields, col, sdata[col]->data, + pObj->pointsPerFileBlock*pObj->schema[col].bytes+EXTRA_BYTES, temp, buffer, bufferSize); + if (code < 0) break; + sdata[col]->len = pObj->schema[col].bytes * pBlock->numOfPoints; + } + + tfree(buffer); + tfree(temp); + tfree(pFields); + return code; +} + +int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[], SData *cdata[], int points) { + SVnodeObj *pVnode = &vnodeList[pObj->vnode]; + SVnodeCfg *pCfg = &pVnode->cfg; + int wlen = 0; + SField * fields = NULL; + int size = sizeof(SField) * pObj->numOfColumns + sizeof(TSCKSUM); + int32_t offset = size; + char * buffer = NULL; + int bufferSize = 0; + + int dfd = pVnode->dfd; + + if (pCompBlock->last && (points < pObj->pointsPerFileBlock * tsFileBlockMinPercent)) { + dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %ld, block etime: %ld", + pObj->vnode, pObj->sid, pObj->meterId, points, *((TSKEY *)(data[0]->data)), + *((TSKEY * )(data[0]->data + (points - 1) * pObj->schema[0].bytes))); + pCompBlock->last = 1; + dfd = pVnode->tfd > 0 ? pVnode->tfd : pVnode->lfd; + } else { + pCompBlock->last = 0; + } + + pCompBlock->offset = lseek(dfd, 0, SEEK_END); + pCompBlock->len = 0; + + fields = (SField *)calloc(1, size); + if (fields == NULL) return -1; + + if (pCfg->compression == TWO_STAGE_COMP){ + bufferSize = pObj->maxBytes * points + EXTRA_BYTES; + buffer = (char *)malloc(bufferSize); + } + + for (int i = 0; i < pObj->numOfColumns; ++i) { + fields[i].colId = pObj->schema[i].colId; + fields[i].type = pObj->schema[i].type; + fields[i].bytes = pObj->schema[i].bytes; + fields[i].offset = offset; + // assert(data[i]->len == points*pObj->schema[i].bytes); + + if (pCfg->compression) { + cdata[i]->len = (*pCompFunc[pObj->schema[i].type])(data[i]->data, points * pObj->schema[i].bytes, points, + cdata[i]->data, pObj->schema[i].bytes*pObj->pointsPerFileBlock+EXTRA_BYTES, + pCfg->compression, buffer, bufferSize); + fields[i].len = cdata[i]->len; + taosCalcChecksumAppend(0, (uint8_t *)(cdata[i]->data), cdata[i]->len + sizeof(TSCKSUM)); + offset += (cdata[i]->len + sizeof(TSCKSUM)); + + } else { + fields[i].len = data[i]->len; + taosCalcChecksumAppend(0, (uint8_t *)(data[i]->data), data[i]->len + sizeof(TSCKSUM)); + offset += (data[i]->len + sizeof(TSCKSUM)); + } + + getStatistics(data[0]->data, data[i]->data, pObj->schema[i].bytes, points, pObj->schema[i].type, &fields[i].min, + &fields[i].max, &fields[i].sum, &fields[i].wsum, &fields[i].numOfNullPoints); + } + + tfree(buffer); + + // Write SField part + taosCalcChecksumAppend(0, (uint8_t *)fields, size); + wlen = write(dfd, fields, size); + if (wlen <= 0) { + tfree(fields); + dError("vid:%d sid:%d id:%s, failed to write block, wlen:%d reason:%s", pObj->vnode, pObj->sid, pObj->meterId, wlen, + strerror(errno)); + return -1; + } + pVnode->vnodeStatistic.compStorage += wlen; + pVnode->dfSize += wlen; + pCompBlock->len += wlen; + tfree(fields); + + // Write data part + for (int i = 0; i < pObj->numOfColumns; ++i) { + if (pCfg->compression) { + wlen = write(dfd, cdata[i]->data, cdata[i]->len + sizeof(TSCKSUM)); + } else { + wlen = write(dfd, data[i]->data, data[i]->len + sizeof(TSCKSUM)); + } + + if (wlen <= 0) { + dError("vid:%d sid:%d id:%s, failed to write block, wlen:%d points:%d reason:%s", + pObj->vnode, pObj->sid, pObj->meterId, wlen, points, strerror(errno)); + return -TSDB_CODE_FILE_CORRUPTED; + } + + pVnode->vnodeStatistic.compStorage += wlen; + pVnode->dfSize += wlen; + pCompBlock->len += wlen; + } + + dTrace("vid: %d vnode compStorage size is: %ld", pObj->vnode, pVnode->vnodeStatistic.compStorage); + + pCompBlock->algorithm = pCfg->compression; + pCompBlock->numOfPoints = points; + pCompBlock->numOfCols = pObj->numOfColumns; + pCompBlock->keyFirst = *((TSKEY *)(data[0]->data)); // hack way to get the key + pCompBlock->keyLast = *((TSKEY *)(data[0]->data + (points - 1) * pObj->schema[0].bytes)); + pCompBlock->sversion = pObj->sversion; + + return 0; +} + +static int forwardInFile(SQuery *pQuery, int32_t midSlot, int32_t step, SVnodeObj *pVnode, SMeterObj *pObj); + +int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { + TSKEY latest, oldest; + int ret = 0; + long delta = 0; + int firstSlot, lastSlot, midSlot; + int numOfBlocks; + char * temp = NULL, *data = NULL; + SCompBlock *pBlock = NULL; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + int step; + char * buffer = NULL; + int bufferSize = 0; + int dfd; + + // if file is broken, pQuery->slot = -2; if not found, pQuery->slot = -1; + + pQuery->slot = -1; + pQuery->pos = -1; + if (pVnode->numOfFiles <= 0) return 0; + + SVnodeCfg *pCfg = &pVnode->cfg; + delta = (long)pCfg->daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + latest = pObj->lastKeyOnFile; + oldest = (pVnode->fileId - pVnode->numOfFiles + 1) * delta; + + if (latest < oldest) return 0; + + if (!QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->skey < oldest) return 0; + if (pQuery->ekey > latest) return 0; + if (pQuery->skey > latest) pQuery->skey = latest; + } else { + if (pQuery->skey > latest) return 0; + if (pQuery->ekey < oldest) return 0; + if (pQuery->skey < oldest) pQuery->skey = oldest; + } + + dTrace("vid:%d sid:%d id:%s, skey:%ld ekey:%ld oldest:%ld latest:%ld fileId:%d numOfFiles:%d", + pObj->vnode, pObj->sid, pObj->meterId, pQuery->skey, pQuery->ekey, oldest, latest, pVnode->fileId, + pVnode->numOfFiles); + + step = QUERY_IS_ASC_QUERY(pQuery) ? 1 : -1; + + pQuery->fileId = pQuery->skey / delta; // starting fileId + pQuery->fileId -= step; // hacker way to make while loop below works + + bufferSize = pCfg->rowsInFileBlock*sizeof(TSKEY)+EXTRA_BYTES; + buffer = (char *)calloc(1, bufferSize); + + while (1) { + pQuery->fileId += step; + + if ((pQuery->fileId > pVnode->fileId) || (pQuery->fileId < pVnode->fileId - pVnode->numOfFiles + 1)) { + tfree(buffer); + return 0; + } + + ret = vnodeGetCompBlockInfo(pObj, pQuery); + if (ret == 0) continue; + if (ret < 0) break; // file broken + + pBlock = pQuery->pBlock; + + firstSlot = 0; + lastSlot = pQuery->numOfBlocks - 1; + numOfBlocks = pQuery->numOfBlocks; + if (QUERY_IS_ASC_QUERY(pQuery) && pBlock[lastSlot].keyLast < pQuery->skey) continue; + if (!QUERY_IS_ASC_QUERY(pQuery) && pBlock[firstSlot].keyFirst > pQuery->skey) continue; + + while (1) { + numOfBlocks = lastSlot - firstSlot + 1; + midSlot = (firstSlot + (numOfBlocks >> 1)); + + if (numOfBlocks == 1) break; + + if (pQuery->skey > pBlock[midSlot].keyLast) { + if (numOfBlocks == 2) break; + if (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey < pBlock[midSlot + 1].keyFirst)) break; + firstSlot = midSlot + 1; + } else if (pQuery->skey < pBlock[midSlot].keyFirst) { + if (QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pBlock[midSlot - 1].keyLast)) break; + lastSlot = midSlot - 1; + } else { + break; // got the slot + } + } + + pQuery->slot = midSlot; + if (!QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->skey < pBlock[midSlot].keyFirst) break; + + if (pQuery->ekey > pBlock[midSlot].keyLast) { + pQuery->slot = midSlot + 1; + break; + } + } else { + if (pQuery->skey > pBlock[midSlot].keyLast) { + pQuery->slot = midSlot + 1; + break; + } + + if (pQuery->ekey < pBlock[midSlot].keyFirst) break; + } + + temp = malloc(pObj->pointsPerFileBlock * TSDB_KEYSIZE + EXTRA_BYTES); // only first column + data = malloc(pObj->pointsPerFileBlock * TSDB_KEYSIZE + EXTRA_BYTES); // only first column + dfd = pBlock[midSlot].last ? pQuery->lfd : pQuery->dfd; + ret = vnodeReadColumnToMem(dfd, pBlock + midSlot, pQuery->pFields + midSlot, 0, data, + pObj->pointsPerFileBlock*TSDB_KEYSIZE+EXTRA_BYTES, + temp, buffer, bufferSize); + if (ret < 0) { + ret = -TSDB_CODE_FILE_CORRUPTED; + break; + } // file broken + + pQuery->pos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])(data, pBlock[midSlot].numOfPoints, pQuery->skey, + pQuery->order.order); + pQuery->key = *((TSKEY *)(data + pObj->schema[0].bytes * pQuery->pos)); + + ret = vnodeForwardStartPosition(pQuery, pBlock, midSlot, pVnode, pObj); + break; + } + + tfree(buffer); + tfree(temp); + tfree(data); + + return ret; +} + +int vnodeForwardStartPosition(SQuery *pQuery, SCompBlock *pBlock, int32_t slotIdx, SVnodeObj *pVnode, SMeterObj *pObj) { + int step = QUERY_IS_ASC_QUERY(pQuery) ? 1 : -1; + + if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols == 0) { + int maxReads = QUERY_IS_ASC_QUERY(pQuery) ? pBlock->numOfPoints - pQuery->pos : pQuery->pos + 1; + + if (pQuery->limit.offset < maxReads) { // start position in current block + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->pos += pQuery->limit.offset; + } else { + pQuery->pos -= pQuery->limit.offset; + } + + pQuery->limit.offset = 0; + + } else { + pQuery->limit.offset -= maxReads; + slotIdx += step; + + return forwardInFile(pQuery, slotIdx, step, pVnode, pObj); + } + } + + return pQuery->numOfBlocks; +} + +int forwardInFile(SQuery *pQuery, int32_t slotIdx, int32_t step, SVnodeObj *pVnode, SMeterObj *pObj) { + SCompBlock *pBlock = pQuery->pBlock; + + while (slotIdx < pQuery->numOfBlocks && slotIdx >= 0 && pQuery->limit.offset >= pBlock[slotIdx].numOfPoints) { + pQuery->limit.offset -= pBlock[slotIdx].numOfPoints; + slotIdx += step; + } + + if (slotIdx < pQuery->numOfBlocks && slotIdx >= 0) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->pos = pQuery->limit.offset; + } else { + pQuery->pos = pBlock[slotIdx].numOfPoints - pQuery->limit.offset - 1; + } + pQuery->slot = slotIdx; + pQuery->limit.offset = 0; + + return pQuery->numOfBlocks; + } else { // continue in next file, forward pQuery->limit.offset points + int ret = 0; + pQuery->slot = -1; + pQuery->pos = -1; + + while (1) { + pQuery->fileId += step; + if ((pQuery->fileId > pVnode->fileId) || (pQuery->fileId < pVnode->fileId - pVnode->numOfFiles + 1)) { + pQuery->lastKey = pObj->lastKeyOnFile; + pQuery->skey = pQuery->lastKey + 1; + return 0; + } + + ret = vnodeGetCompBlockInfo(pObj, pQuery); + if (ret == 0) continue; + if (ret > 0) break; // qualified file + } + + if (ret > 0) { + int startSlot = QUERY_IS_ASC_QUERY(pQuery) ? 0 : pQuery->numOfBlocks - 1; + return forwardInFile(pQuery, startSlot, step, pVnode, pObj); + } else { + return ret; + } + } +} + +static FORCE_INLINE TSKEY vnodeGetTSInDataBlock(SQuery *pQuery, int32_t pos, int32_t factor) { + return *(TSKEY *)(pQuery->tsData->data + (pQuery->pointsOffset * factor + pos) * TSDB_KEYSIZE); +} + +int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { + int numOfReads = 0; + + int lastPos = -1, startPos; + int col, step, code = 0; + char * pRead, *pData; + char * buffer; + SData * sdata[TSDB_MAX_COLUMNS]; + SCompBlock *pBlock = NULL; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + pQuery->pointsRead = 0; + int keyLen = TSDB_KEYSIZE; + + if (pQuery->over) return 0; + + if (pQuery->slot < 0) // it means a new query, we need to find the point first + code = vnodeSearchPointInFile(pObj, pQuery); + + if (code < 0 || pQuery->slot < 0 || pQuery->pos == -1) { + pQuery->over = 1; + return code; + } + + step = QUERY_IS_ASC_QUERY(pQuery) ? -1 : 1; + pBlock = pQuery->pBlock + pQuery->slot; + + if (pQuery->pos == FILE_QUERY_NEW_BLOCK) { + if (!QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->ekey > pBlock->keyLast) pQuery->over = 1; + if (pQuery->skey < pBlock->keyFirst) pQuery->over = 1; + } else { + if (pQuery->ekey < pBlock->keyFirst) pQuery->over = 1; + if (pQuery->skey > pBlock->keyLast) pQuery->over = 1; + } + + pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : pBlock->numOfPoints - 1; + } + + if (pQuery->over) return 0; + + // allocate memory more efficiently + buffer = calloc(1, pQuery->dataRowSize * pBlock->numOfPoints + sizeof(SData) * pQuery->numOfCols); + sdata[0] = (SData *)buffer; + for (col = 1; col < pQuery->numOfCols; ++col) + sdata[col] = + (SData *)(((char *)sdata[col - 1]) + sizeof(SData) + pBlock->numOfPoints * pQuery->colList[col - 1].data.bytes); + + /* + * timestamp column is fetched in any cases. Therefore, if the query does not fetch primary column, + * we allocate tsData buffer with twice size of the other ordinary pQuery->sdata. + * Otherwise, the query function may over-write buffer area while retrieve function has not packed the results into + * message to send to client yet. + * So the startPositionFactor is needed to denote which half part is used to store the result, and which + * part is available for keep data during query process. + * Note: the startPositionFactor must be used in conjunction with + * pQuery->pointsOffset + */ + int32_t startPositionFactor = 1; + if (pQuery->colList[0].colIdx == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + pQuery->tsData = sdata[0]; + startPositionFactor = 0; + } + + code = vnodeReadCompBlockToMem(pObj, pQuery, sdata); + if (code < 0) { + dError("vid:%d sid:%d id:%s, failed to read block:%d numOfPoints:%d", pObj->vnode, pObj->sid, pObj->meterId, + pQuery->slot, pBlock->numOfPoints); + goto _next; + } + + int maxReads = QUERY_IS_ASC_QUERY(pQuery) ? pBlock->numOfPoints - pQuery->pos : pQuery->pos + 1; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + TSKEY endKey = vnodeGetTSInDataBlock(pQuery, pBlock->numOfPoints - 1, startPositionFactor); + if (endKey < pQuery->ekey) { + numOfReads = maxReads; + } else { + lastPos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])( + pQuery->tsData->data + keyLen * (pQuery->pos + pQuery->pointsOffset * startPositionFactor), maxReads, + pQuery->ekey, TSQL_SO_DESC); + numOfReads = (lastPos >= 0) ? lastPos + 1 : 0; + } + } else { + TSKEY startKey = vnodeGetTSInDataBlock(pQuery, 0, startPositionFactor); + if (startKey > pQuery->ekey) { + numOfReads = maxReads; + } else { + lastPos = (*vnodeSearchKeyFunc[pObj->searchAlgorithm])( + pQuery->tsData->data + keyLen * pQuery->pointsOffset * startPositionFactor, maxReads, pQuery->ekey, + TSQL_SO_ASC); + numOfReads = (lastPos >= 0) ? pQuery->pos - lastPos + 1 : 0; + } + } + + if (numOfReads > pQuery->pointsToRead - pQuery->pointsRead) { + numOfReads = pQuery->pointsToRead - pQuery->pointsRead; + } else { + if (lastPos >= 0 || numOfReads == 0) { + pQuery->keyIsMet = 1; + pQuery->over = 1; + } + } + + startPos = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->pos : pQuery->pos - numOfReads + 1; + + int32_t numOfQualifiedPoints = 0; + int32_t numOfActualRead = numOfReads; + + // copy data to result buffer + if (pQuery->numOfFilterCols == 0) { + // no filter condition on ordinary columns + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int16_t colBufferIndex = pQuery->pSelectExpr[i].pBase.colInfo.colIdxInBuf; + int32_t bytes = GET_COLUMN_BYTES(pQuery, i); + + pData = pQuery->sdata[i]->data + pQuery->pointsOffset * bytes; + pRead = sdata[colBufferIndex]->data + startPos * bytes; + + memcpy(pData, pRead, numOfReads * bytes); + } + + numOfQualifiedPoints = numOfReads; + + } else { + // check each data one by one set the input column data + for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { + SColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; + pFilterInfo->pData = sdata[pFilterInfo->pFilter.colIdxInBuf]->data; + } + + int32_t *ids = calloc(1, numOfReads * sizeof(int32_t)); + numOfActualRead = 0; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + for (int32_t j = startPos; j < pBlock->numOfPoints; j -= step) { + TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); + assert(key >= pQuery->skey); + + // out of query range, quit + if (key > pQuery->ekey) { + break; + } + + if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { + continue; + } + + ids[numOfQualifiedPoints] = j; + if (++numOfQualifiedPoints == numOfReads) { + // qualified data are enough + break; + } + } + } else { + for (int32_t j = pQuery->pos; j >= 0; --j) { + TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); + assert(key <= pQuery->skey); + + // out of query range, quit + if (key < pQuery->ekey) { + break; + } + + if (!vnodeFilterData(pQuery, &numOfActualRead, j)) { + continue; + } + + ids[numOfReads - numOfQualifiedPoints - 1] = j; + if (++numOfQualifiedPoints == numOfReads) { + // qualified data are enough + break; + } + } + } + + int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints; + for (int32_t j = 0; j < numOfQualifiedPoints; ++j) { + for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { + int16_t colIndexInBuffer = pQuery->pSelectExpr[col].pBase.colInfo.colIdxInBuf; + int32_t bytes = GET_COLUMN_BYTES(pQuery, col); + pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes; + pRead = sdata[colIndexInBuffer]->data + ids[j + start] * bytes; + + memcpy(pData, pRead, bytes); + } + } + + tfree(ids); + assert(numOfQualifiedPoints <= numOfReads); + } + + // Note: numOfQualifiedPoints may be 0, since no data in this block are qualified + assert(pQuery->pointsRead == 0); + + pQuery->pointsRead += numOfQualifiedPoints; + for (col = 0; col < pQuery->numOfOutputCols; ++col) { + int16_t bytes = GET_COLUMN_BYTES(pQuery, col); + pQuery->sdata[col]->len = bytes * (pQuery->pointsOffset + pQuery->pointsRead); + } + pQuery->pos -= numOfActualRead * step; + + // update the lastkey/skey + int32_t lastAccessPos = pQuery->pos + step; + pQuery->lastKey = vnodeGetTSInDataBlock(pQuery, lastAccessPos, startPositionFactor); + pQuery->skey = pQuery->lastKey - step; + +_next: + if ((pQuery->pos < 0 || pQuery->pos >= pBlock->numOfPoints || numOfReads == 0) && (pQuery->over == 0)) { + pQuery->slot = pQuery->slot - step; + pQuery->pos = FILE_QUERY_NEW_BLOCK; + } + + if ((pQuery->slot < 0 || pQuery->slot >= pQuery->numOfBlocks) && (pQuery->over == 0)) { + int ret; + + while (1) { + ret = -1; + pQuery->fileId -= step; // jump to next file + + if (QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->fileId > pVnode->fileId) { + // to do: + // check if file is updated, if updated, open again and check if this Meter is updated + // if meter is updated, read in new block info, and + break; + } + } else { + if ((pVnode->fileId - pQuery->fileId + 1) > pVnode->numOfFiles) break; + } + + ret = vnodeGetCompBlockInfo(pObj, pQuery); + if (ret > 0) break; + if (ret < 0) code = ret; + } + + if (ret <= 0) pQuery->over = 1; + + pQuery->slot = QUERY_IS_ASC_QUERY(pQuery) ? 0 : pQuery->numOfBlocks - 1; + } + + tfree(buffer); + + return code; +} + +int vnodeUpdateFileMagic(int vnode, int fileId) { + struct stat fstat; + char fileName[256]; + + SVnodeObj *pVnode = vnodeList + vnode; + uint64_t magic = 0; + + vnodeGetHeadDataLname(fileName, NULL, NULL, vnode, fileId); + if (stat(fileName, &fstat) != 0) { + dError("vid:%d, head file:%s is not there", vnode, fileName); + return -1; + } + + int size = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM) + TSDB_FILE_HEADER_LEN; + if (fstat.st_size < size) { + dError("vid:%d, head file:%s is corrupted", vnode, fileName); + return -1; + } + + if (fstat.st_size == size) return 0; + + vnodeGetHeadDataLname(NULL, fileName, NULL, vnode, fileId); + if (stat(fileName, &fstat) == 0) { + magic = fstat.st_size; + } else { + dError("vid:%d, data file:%s is not there", vnode, fileName); + return -1; + } + + vnodeGetHeadDataLname(NULL, NULL, fileName, vnode, fileId); + if (stat(fileName, &fstat) == 0) { + magic += fstat.st_size; + } + + int slot = fileId % pVnode->maxFiles; + pVnode->fmagic[slot] = magic; + + return 0; +} + +int vnodeInitFile(int vnode) { + int code = 0; + SVnodeObj *pVnode = vnodeList + vnode; + + pVnode->maxFiles = pVnode->cfg.daysToKeep / pVnode->cfg.daysPerFile + 1; + pVnode->maxFile1 = pVnode->cfg.daysToKeep1 / pVnode->cfg.daysPerFile; + pVnode->maxFile2 = pVnode->cfg.daysToKeep2 / pVnode->cfg.daysPerFile; + pVnode->fmagic = (uint64_t *)calloc(pVnode->maxFiles + 1, sizeof(uint64_t)); + int fileId = pVnode->fileId; + + for (int i = 0; i < pVnode->numOfFiles; ++i) { + if (vnodeUpdateFileMagic(vnode, fileId) < 0) { + if (pVnode->cfg.replications > 1) { + pVnode->badFileId = fileId; + } + dError("vid:%d fileId:%d is corrupted", vnode, fileId); + } else { + dTrace("vid:%d fileId:%d is checked", vnode, fileId); + } + + fileId--; + } + + return code; +} + +int vnodeRecoverCompHeader(int vnode, int fileId) { + // TODO: try to recover SCompHeader part + dTrace( + "starting to recover vnode head file comp header part, vnode: %d fileId: " + "%d", + vnode, fileId); + assert(0); + return 0; +} + +int vnodeRecoverHeadFile(int vnode, int fileId) { + // TODO: try to recover SCompHeader part + dTrace("starting to recover vnode head file, vnode: %d, fileId: %d", vnode, fileId); + assert(0); + return 0; +} + +int vnodeRecoverDataFile(int vnode, int fileId) { + // TODO: try to recover SCompHeader part + dTrace("starting to recover vnode data file, vnode: %d, fileId: %d", vnode, fileId); + assert(0); + return 0; +} diff --git a/src/system/src/vnodeFilterFunc.c b/src/system/src/vnodeFilterFunc.c new file mode 100644 index 000000000000..553e19475915 --- /dev/null +++ b/src/system/src/vnodeFilterFunc.c @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "tsqlfunction.h" +#include "vnode.h" +#include "vnodeDataFilterFunc.h" + +bool less_i8(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)minval < pFilter->data.upperBndi); +} + +bool less_i16(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)minval < pFilter->data.upperBndi); +} + +bool less_i32(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)minval < pFilter->data.upperBndi); +} + +bool less_i64(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)minval < pFilter->data.upperBndi); +} + +bool less_ds(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)minval < pFilter->data.upperBndd); +} + +bool less_dd(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)minval < pFilter->data.upperBndd); +} + +////////////////////////////////////////////////////////////////// +bool large_i8(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)maxval > pFilter->data.lowerBndi); +} + +bool large_i16(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)maxval > pFilter->data.lowerBndi); +} + +bool large_i32(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)maxval > pFilter->data.lowerBndi); +} + +bool large_i64(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)maxval > pFilter->data.lowerBndi); +} + +bool large_ds(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)maxval > pFilter->data.lowerBndd); +} + +bool large_dd(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)maxval > pFilter->data.lowerBndd); +} +///////////////////////////////////////////////////////////////////// + +bool lessEqual_i8(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)minval <= pFilter->data.upperBndi); +} + +bool lessEqual_i16(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)minval <= pFilter->data.upperBndi); +} + +bool lessEqual_i32(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)minval <= pFilter->data.upperBndi); +} + +bool lessEqual_i64(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)minval <= pFilter->data.upperBndi); +} + +bool lessEqual_ds(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)minval <= pFilter->data.upperBndd); +} + +bool lessEqual_dd(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)minval <= pFilter->data.upperBndd); +} + +////////////////////////////////////////////////////////////////////////// +bool largeEqual_i8(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)maxval >= pFilter->data.lowerBndi); +} + +bool largeEqual_i16(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)maxval >= pFilter->data.lowerBndi); +} + +bool largeEqual_i32(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)maxval >= pFilter->data.lowerBndi); +} + +bool largeEqual_i64(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)maxval >= pFilter->data.lowerBndi); +} + +bool largeEqual_ds(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)maxval >= pFilter->data.lowerBndd); +} + +bool largeEqual_dd(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)maxval >= pFilter->data.lowerBndd); +} + +//////////////////////////////////////////////////////////////////////// + +bool equal_i8(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int8_t *)minval == *(int8_t *)maxval) { + return (*(int8_t *)minval == pFilter->data.lowerBndi); + } else { /* range filter */ + assert(*(int8_t *)minval < *(int8_t *)maxval); + + return *(int8_t *)minval <= pFilter->data.lowerBndi && *(int8_t *)maxval >= pFilter->data.lowerBndi; + } +} + +bool equal_i16(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int16_t *)minval == *(int16_t *)maxval) { + return (*(int16_t *)minval == pFilter->data.lowerBndi); + } else { /* range filter */ + assert(*(int16_t *)minval < *(int16_t *)maxval); + + return *(int16_t *)minval <= pFilter->data.lowerBndi && *(int16_t *)maxval >= pFilter->data.lowerBndi; + } +} + +bool equal_i32(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int32_t *)minval == *(int32_t *)maxval) { + return (*(int32_t *)minval == pFilter->data.lowerBndi); + } else { /* range filter */ + assert(*(int32_t *)minval < *(int32_t *)maxval); + + return *(int32_t *)minval <= pFilter->data.lowerBndi && *(int32_t *)maxval >= pFilter->data.lowerBndi; + } +} + +bool equal_i64(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int64_t *)minval == *(int64_t *)maxval) { + return (*(int64_t *)minval == pFilter->data.lowerBndi); + } else { /* range filter */ + assert(*(int64_t *)minval < *(int64_t *)maxval); + + return *(int64_t *)minval <= pFilter->data.lowerBndi && *(int64_t *)maxval >= pFilter->data.lowerBndi; + } +} + +bool equal_ds(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(float *)minval == *(float *)maxval) { + return (fabs(*(float *)minval - pFilter->data.lowerBndd) <= FLT_EPSILON); + } else { /* range filter */ + assert(*(float *)minval < *(float *)maxval); + return *(float *)minval <= pFilter->data.lowerBndd && *(float *)maxval >= pFilter->data.lowerBndd; + } +} + +bool equal_dd(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(double *)minval == *(double *)maxval) { + return (*(double *)minval == pFilter->data.lowerBndd); + } else { /* range filter */ + assert(*(double *)minval < *(double *)maxval); + + return *(double *)minval <= pFilter->data.lowerBndi && *(double *)maxval >= pFilter->data.lowerBndi; + } +} + +bool equal_str(SColumnFilter *pFilter, char *minval, char *maxval) { + // query condition string is greater than the max length of string, not qualified data + if (pFilter->data.len > pFilter->data.bytes) { + return false; + } + + return strncmp((char *)pFilter->data.pz, minval, pFilter->data.bytes) == 0; +} + +//////////////////////////////////////////////////////////////// +bool like_str(SColumnFilter *pFilter, char *minval, char *maxval) { + SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; + + return patternMatch((char *)pFilter->data.pz, minval, pFilter->data.bytes, &info) == TSDB_PATTERN_MATCH; +} + +//////////////////////////////////////////////////////////////// +/** + * If minval equals to maxval, it may serve as the one element filter, + * or all elements of an array are identical during pref-filter stage. + * Otherwise, it must be pre-filter of array list of elements. + * + * During pre-filter stage, if there is one element that locates in [minval, maxval], + * the filter function will return true. + */ +bool nequal_i8(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int8_t *)minval == *(int8_t *)maxval) { + return (*(int8_t *)minval != pFilter->data.lowerBndi); + } + + return true; +} + +bool nequal_i16(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int16_t *)minval == *(int16_t *)maxval) { + return (*(int16_t *)minval != pFilter->data.lowerBndi); + } + + return true; +} + +bool nequal_i32(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int32_t *)minval == *(int32_t *)maxval) { + return (*(int32_t *)minval != pFilter->data.lowerBndi); + } + + return true; +} + +bool nequal_i64(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(int64_t *)minval == *(int64_t *)maxval) { + return (*(int64_t *)minval != pFilter->data.lowerBndi); + } + + return true; +} + +bool nequal_ds(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(float *)minval == *(float *)maxval) { + return (*(float *)minval != pFilter->data.lowerBndd); + } + + return true; +} + +bool nequal_dd(SColumnFilter *pFilter, char *minval, char *maxval) { + if (*(double *)minval == *(double *)maxval) { + return (*(double *)minval != pFilter->data.lowerBndd); + } + + return true; +} + +bool nequal_str(SColumnFilter *pFilter, char *minval, char *maxval) { + if (pFilter->data.len > pFilter->data.bytes) { + return true; + } + + return strncmp((char *)pFilter->data.pz, minval, pFilter->data.bytes) != 0; +} + +//////////////////////////////////////////////////////////////// + +bool rangeFilter_i32_ii(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)minval <= pFilter->data.upperBndi && *(int32_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i32_ee(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)minvaldata.upperBndi &&*(int32_t *)maxval> pFilter->data.lowerBndi); +} + +bool rangeFilter_i32_ie(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)minval < pFilter->data.upperBndi && *(int32_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i32_ei(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int32_t *)minval <= pFilter->data.upperBndi && *(int32_t *)maxval > pFilter->data.lowerBndi); +} + +/////////////////////////////////////////////////////////////////////////////// +bool rangeFilter_i8_ii(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)minval <= pFilter->data.upperBndi && *(int8_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i8_ee(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)minvaldata.upperBndi &&*(int8_t *)maxval> pFilter->data.lowerBndi); +} + +bool rangeFilter_i8_ie(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)minval < pFilter->data.upperBndi && *(int8_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i8_ei(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int8_t *)minval <= pFilter->data.upperBndi && *(int8_t *)maxval > pFilter->data.lowerBndi); +} + +///////////////////////////////////////////////////////////////////////////////////// +bool rangeFilter_i16_ii(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)minval <= pFilter->data.upperBndi && *(int16_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i16_ee(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)minvaldata.upperBndi &&*(int16_t *)maxval> pFilter->data.lowerBndi); +} + +bool rangeFilter_i16_ie(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)minval < pFilter->data.upperBndi && *(int16_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i16_ei(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int16_t *)minval <= pFilter->data.upperBndi && *(int16_t *)maxval > pFilter->data.lowerBndi); +} + +//////////////////////////////////////////////////////////////////////// +bool rangeFilter_i64_ii(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)minval <= pFilter->data.upperBndi && *(int64_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i64_ee(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)minvaldata.upperBndi &&*(int64_t *)maxval> pFilter->data.lowerBndi); +} + +bool rangeFilter_i64_ie(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)minval < pFilter->data.upperBndi && *(int64_t *)maxval >= pFilter->data.lowerBndi); +} + +bool rangeFilter_i64_ei(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(int64_t *)minval <= pFilter->data.upperBndi && *(int64_t *)maxval > pFilter->data.lowerBndi); +} + +//////////////////////////////////////////////////////////////////////// +bool rangeFilter_ds_ii(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)minval <= pFilter->data.upperBndd && *(float *)maxval >= pFilter->data.lowerBndd); +} + +bool rangeFilter_ds_ee(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)minvaldata.upperBndd &&*(float *)maxval> pFilter->data.lowerBndd); +} + +bool rangeFilter_ds_ie(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)minval < pFilter->data.upperBndd && *(float *)maxval >= pFilter->data.lowerBndd); +} + +bool rangeFilter_ds_ei(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(float *)minval <= pFilter->data.upperBndd && *(float *)maxval > pFilter->data.lowerBndd); +} + +////////////////////////////////////////////////////////////////////////// +bool rangeFilter_dd_ii(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)minval <= pFilter->data.upperBndd && *(double *)maxval >= pFilter->data.lowerBndd); +} + +bool rangeFilter_dd_ee(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)minvaldata.upperBndd &&*(double *)maxval> pFilter->data.lowerBndd); +} + +bool rangeFilter_dd_ie(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)minval < pFilter->data.upperBndd && *(double *)maxval >= pFilter->data.lowerBndd); +} + +bool rangeFilter_dd_ei(SColumnFilter *pFilter, char *minval, char *maxval) { + return (*(double *)minval <= pFilter->data.upperBndd && *(double *)maxval > pFilter->data.lowerBndd); +} + +//////////////////////////////////////////////////////////////////////////// +bool (*filterFunc_i8[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, less_i8, large_i8, equal_i8, lessEqual_i8, largeEqual_i8, nequal_i8, NULL, +}; + +bool (*filterFunc_i16[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, less_i16, large_i16, equal_i16, lessEqual_i16, largeEqual_i16, nequal_i16, NULL, +}; + +bool (*filterFunc_i32[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, less_i32, large_i32, equal_i32, lessEqual_i32, largeEqual_i32, nequal_i32, NULL, +}; + +bool (*filterFunc_i64[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, less_i64, large_i64, equal_i64, lessEqual_i64, largeEqual_i64, nequal_i64, NULL, +}; + +bool (*filterFunc_ds[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, less_ds, large_ds, equal_ds, lessEqual_ds, largeEqual_ds, nequal_ds, NULL, +}; + +bool (*filterFunc_dd[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, less_dd, large_dd, equal_dd, lessEqual_dd, largeEqual_dd, nequal_dd, NULL, +}; + +bool (*filterFunc_str[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, NULL, NULL, equal_str, NULL, NULL, nequal_str, like_str, +}; + +bool (*rangeFilterFunc_i8[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, rangeFilter_i8_ee, rangeFilter_i8_ie, rangeFilter_i8_ei, rangeFilter_i8_ii, +}; + +bool (*rangeFilterFunc_i16[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, rangeFilter_i16_ee, rangeFilter_i16_ie, rangeFilter_i16_ei, rangeFilter_i16_ii, +}; + +bool (*rangeFilterFunc_i32[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, rangeFilter_i32_ee, rangeFilter_i32_ie, rangeFilter_i32_ei, rangeFilter_i32_ii, +}; + +bool (*rangeFilterFunc_i64[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, rangeFilter_i64_ee, rangeFilter_i64_ie, rangeFilter_i64_ei, rangeFilter_i64_ii, +}; + +bool (*rangeFilterFunc_ds[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, rangeFilter_ds_ee, rangeFilter_ds_ie, rangeFilter_ds_ei, rangeFilter_ds_ii, +}; + +bool (*rangeFilterFunc_dd[])(SColumnFilter *pFilter, char *minval, char *maxval) = { + NULL, rangeFilter_dd_ee, rangeFilter_dd_ie, rangeFilter_dd_ei, rangeFilter_dd_ii, +}; + +__filter_func_t *vnodeGetRangeFilterFuncArray(int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + return rangeFilterFunc_i8; + case TSDB_DATA_TYPE_TINYINT: + return rangeFilterFunc_i8; + case TSDB_DATA_TYPE_SMALLINT: + return rangeFilterFunc_i16; + case TSDB_DATA_TYPE_INT: + return rangeFilterFunc_i32; + case TSDB_DATA_TYPE_TIMESTAMP: // timestamp uses bigint filter + case TSDB_DATA_TYPE_BIGINT: + return rangeFilterFunc_i64; + case TSDB_DATA_TYPE_FLOAT: + return rangeFilterFunc_ds; + case TSDB_DATA_TYPE_DOUBLE: + return rangeFilterFunc_dd; + default: + return NULL; + } +} + +__filter_func_t *vnodeGetValueFilterFuncArray(int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + return filterFunc_i8; + case TSDB_DATA_TYPE_TINYINT: + return filterFunc_i8; + case TSDB_DATA_TYPE_SMALLINT: + return filterFunc_i16; + case TSDB_DATA_TYPE_INT: + return filterFunc_i32; + case TSDB_DATA_TYPE_TIMESTAMP: // timestamp uses bigint filter + case TSDB_DATA_TYPE_BIGINT: + return filterFunc_i64; + case TSDB_DATA_TYPE_FLOAT: + return filterFunc_ds; + case TSDB_DATA_TYPE_DOUBLE: + return filterFunc_dd; + case TSDB_DATA_TYPE_BINARY: + return filterFunc_str; + default: + return NULL; + } +} + +bool vnodeSupportPrefilter(int32_t type) { return type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR; } diff --git a/src/system/src/vnodeImport.c b/src/system/src/vnodeImport.c new file mode 100644 index 000000000000..b81b603a4c9e --- /dev/null +++ b/src/system/src/vnodeImport.c @@ -0,0 +1,936 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "trpc.h" +#include "ttimer.h" +#include "vnode.h" +#include "vnodeMgmt.h" +#include "vnodeShell.h" +#include "vnodeUtil.h" + +typedef struct { + SCompHeader *headList; + SCompInfo compInfo; + int last; // 0:last block in data file, 1:not the last block + int newBlocks; + int oldNumOfBlocks; + int64_t compInfoOffset; // offset for compInfo in head file + int64_t leftOffset; // copy from this offset to end of head file + int64_t hfdSize; // old head file size +} SHeadInfo; + +typedef struct { + void * signature; + SShellObj *pShell; + SMeterObj *pObj; + int retry; + TSKEY firstKey; + TSKEY lastKey; + int importedRows; + int commit; // start to commit if it is set to 1 + + int slot; // slot/block to start writing the import data + int pos; // pos to start writing the import data in the slot/block + TSKEY key; + + // only for file + int numOfPoints; + int fileId; + int64_t offset; // offset in data file + SData * sdata[TSDB_MAX_COLUMNS]; + char * buffer; + char * payload; + int rows; +} SImportInfo; + +#define EXTRA_BYTES 8 +int vnodeImportData(SMeterObj *pObj, SImportInfo *pImport); + +int vnodeGetImportStartPart(SMeterObj *pObj, char *payload, int rows, TSKEY key1) { + int i; + + for (i = 0; i < rows; ++i) { + TSKEY key = *((TSKEY *)(payload + i * pObj->bytesPerPoint)); + if (key >= key1) break; + } + + return i; +} + +int vnodeGetImportEndPart(SMeterObj *pObj, char *payload, int rows, char **pStart, TSKEY key0) { + int i; + + for (i = 0; i < rows; ++i) { + TSKEY key = *((TSKEY *)(payload + i * pObj->bytesPerPoint)); + if (key > key0) break; + } + + *pStart = payload + i * pObj->bytesPerPoint; + return rows - i; +} + +int vnodeCloseFileForImport(SMeterObj *pObj, SHeadInfo *pHinfo) { + SVnodeObj *pVnode = &vnodeList[pObj->vnode]; + SVnodeCfg *pCfg = &pVnode->cfg; + TSCKSUM chksum = 0; + + assert(pHinfo->newBlocks); + assert(pHinfo->compInfoOffset); + + if (pHinfo->oldNumOfBlocks == 0) write(pVnode->nfd, &chksum, sizeof(TSCKSUM)); + + int leftSize = pHinfo->hfdSize - pHinfo->leftOffset; + if (leftSize > 0) { + lseek(pVnode->hfd, pHinfo->leftOffset, SEEK_SET); + sendfile(pVnode->nfd, pVnode->hfd, NULL, leftSize); + } + + pHinfo->compInfo.numOfBlocks += pHinfo->newBlocks; + int offset = (pHinfo->compInfo.numOfBlocks - pHinfo->oldNumOfBlocks) * sizeof(SCompBlock); + if (pHinfo->oldNumOfBlocks == 0) offset += sizeof(SCompInfo) + sizeof(TSCKSUM); + + pHinfo->headList[pObj->sid].compInfoOffset = pHinfo->compInfoOffset; + for (int sid = pObj->sid + 1; sid < pCfg->maxSessions; ++sid) { + if (pHinfo->headList[sid].compInfoOffset) pHinfo->headList[sid].compInfoOffset += offset; + } + + lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + int tmsize = sizeof(SCompHeader) * pCfg->maxSessions + sizeof(TSCKSUM); + taosCalcChecksumAppend(0, (uint8_t *)pHinfo->headList, tmsize); + write(pVnode->nfd, pHinfo->headList, tmsize); + + int size = pHinfo->compInfo.numOfBlocks * sizeof(SCompBlock); + char *buffer = malloc(size); + lseek(pVnode->nfd, pHinfo->compInfoOffset + sizeof(SCompInfo), SEEK_SET); + read(pVnode->nfd, buffer, size); + SCompBlock *pBlock = (SCompBlock *)(buffer + (pHinfo->compInfo.numOfBlocks - 1) * sizeof(SCompBlock)); + + pHinfo->compInfo.uid = pObj->uid; + pHinfo->compInfo.delimiter = TSDB_VNODE_DELIMITER; + pHinfo->compInfo.last = pBlock->last; + + taosCalcChecksumAppend(0, (uint8_t *)(&pHinfo->compInfo), sizeof(SCompInfo)); + lseek(pVnode->nfd, pHinfo->compInfoOffset, SEEK_SET); + write(pVnode->nfd, &pHinfo->compInfo, sizeof(SCompInfo)); + + chksum = taosCalcChecksum(0, buffer, size); + lseek(pVnode->nfd, pHinfo->compInfoOffset + sizeof(SCompInfo) + size, SEEK_SET); + write(pVnode->nfd, &chksum, sizeof(TSCKSUM)); + free(buffer); + + vnodeCloseCommitFiles(pVnode); + + return 0; +} + +int vnodeProcessLastBlock(SImportInfo *pImport, SHeadInfo *pHinfo, SData *data[]) { + SMeterObj *pObj = pImport->pObj; + SVnodeObj *pVnode = &vnodeList[pObj->vnode]; + SCompBlock lastBlock; + int code = 0; + + if (pHinfo->compInfo.last == 0) return 0; + + // read into memory + uint64_t offset = + pHinfo->compInfoOffset + (pHinfo->compInfo.numOfBlocks - 1) * sizeof(SCompBlock) + sizeof(SCompInfo); + lseek(pVnode->hfd, offset, SEEK_SET); + read(pVnode->hfd, &lastBlock, sizeof(SCompBlock)); + assert(lastBlock.last); + + if (lastBlock.sversion != pObj->sversion) { + lseek(pVnode->lfd, lastBlock.offset, SEEK_SET); + lastBlock.offset = lseek(pVnode->dfd, 0, SEEK_END); + sendfile(pVnode->dfd, pVnode->lfd, NULL, lastBlock.len); + + lastBlock.last = 0; + lseek(pVnode->hfd, offset, SEEK_SET); + write(pVnode->hfd, &lastBlock, sizeof(SCompBlock)); + } else { + vnodeReadLastBlockToMem(pObj, &lastBlock, data); + pHinfo->compInfo.numOfBlocks--; + code = lastBlock.numOfPoints; + pImport->slot--; + } + + return code; +} + +int vnodeOpenFileForImport(SImportInfo *pImport, char *payload, SHeadInfo *pHinfo, SData *data[]) { + SMeterObj * pObj = pImport->pObj; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + SVnodeCfg * pCfg = &pVnode->cfg; + TSKEY firstKey = *((TSKEY *)payload); + struct stat filestat; + int sid, rowsBefore = 0; + + if (pVnode->nfd <= 0 || firstKey > pVnode->commitLastKey) { + if (pVnode->nfd > 0) vnodeCloseFileForImport(pObj, pHinfo); + + pVnode->commitFirstKey = firstKey; + if (vnodeOpenCommitFiles(pVnode, pObj->sid) < 0) return -1; + + fstat(pVnode->hfd, &filestat); + pHinfo->hfdSize = filestat.st_size; + pHinfo->newBlocks = 0; + pHinfo->last = 1; // by default, new blockes are at the end of block list + + lseek(pVnode->hfd, TSDB_FILE_HEADER_LEN, SEEK_SET); + read(pVnode->hfd, pHinfo->headList, sizeof(SCompHeader) * pCfg->maxSessions); + + if (pHinfo->headList[pObj->sid].compInfoOffset > 0) { + lseek(pVnode->hfd, pHinfo->headList[pObj->sid].compInfoOffset, SEEK_SET); + if (read(pVnode->hfd, &pHinfo->compInfo, sizeof(SCompInfo)) != sizeof(SCompInfo)) { + dError("vid:%d sid:%d, failed to read compInfo from file:%s", pObj->vnode, pObj->sid, pVnode->cfn); + return -1; + } + + pHinfo->compInfoOffset = pHinfo->headList[pObj->sid].compInfoOffset; + pHinfo->leftOffset = pHinfo->headList[pObj->sid].compInfoOffset + sizeof(SCompInfo); + } else { + memset(&pHinfo->compInfo, 0, sizeof(SCompInfo)); + pHinfo->compInfo.uid = pObj->uid; + + for (sid = pObj->sid + 1; sid < pCfg->maxSessions; ++sid) + if (pHinfo->headList[sid].compInfoOffset > 0) break; + + pHinfo->compInfoOffset = (sid == pCfg->maxSessions) ? pHinfo->hfdSize : pHinfo->headList[sid].compInfoOffset; + pHinfo->leftOffset = pHinfo->compInfoOffset; + } + + pHinfo->oldNumOfBlocks = pHinfo->compInfo.numOfBlocks; + lseek(pVnode->hfd, 0, SEEK_SET); + lseek(pVnode->nfd, 0, SEEK_SET); + sendfile(pVnode->nfd, pVnode->hfd, NULL, pHinfo->compInfoOffset); + write(pVnode->nfd, &pHinfo->compInfo, sizeof(SCompInfo)); + if (pHinfo->headList[pObj->sid].compInfoOffset > 0) lseek(pVnode->hfd, sizeof(SCompInfo), SEEK_CUR); + + if (pVnode->commitFileId < pImport->fileId) { + if (pHinfo->compInfo.numOfBlocks > 0) + pHinfo->leftOffset += pHinfo->compInfo.numOfBlocks * sizeof(SCompBlock) + sizeof(TSCKSUM); + + rowsBefore = vnodeProcessLastBlock(pImport, pHinfo, data); + + // copy all existing compBlockInfo + lseek(pVnode->hfd, pHinfo->compInfoOffset + sizeof(SCompInfo), SEEK_SET); + if (pHinfo->compInfo.numOfBlocks > 0) + sendfile(pVnode->nfd, pVnode->hfd, NULL, pHinfo->compInfo.numOfBlocks * sizeof(SCompBlock)); + + } else if (pVnode->commitFileId == pImport->fileId) { + int slots = pImport->pos ? pImport->slot + 1 : pImport->slot; + pHinfo->leftOffset += slots * sizeof(SCompBlock); + + // check if last block is at last file, if it is, read into memory + if (pImport->pos == 0 && pHinfo->compInfo.numOfBlocks > 0 && pImport->slot == pHinfo->compInfo.numOfBlocks && + pHinfo->compInfo.last) { + rowsBefore = vnodeProcessLastBlock(pImport, pHinfo, data); + } + + // this block will be replaced by new blocks + if (pImport->pos > 0) pHinfo->compInfo.numOfBlocks--; + + if (pImport->slot > 0) { + lseek(pVnode->hfd, pHinfo->compInfoOffset + sizeof(SCompInfo), SEEK_SET); + sendfile(pVnode->nfd, pVnode->hfd, NULL, pImport->slot * sizeof(SCompBlock)); + } + + if (pImport->slot < pHinfo->compInfo.numOfBlocks) + pHinfo->last = 0; // new blocks are not at the end of block list + + } else { + // nothing + + pHinfo->last = 0; // new blocks are not at the end of block list + } + } + + return rowsBefore; +} + +extern int vnodeSendShellSubmitRspMsg(SShellObj *pObj, int code, int numOfPoints); +int vnodeImportToFile(SImportInfo *pImport); + +void vnodeProcessImportTimer(void *param, void *tmrId) { + SImportInfo *pImport = (SImportInfo *)param; + if (pImport == NULL || pImport->signature != param) { + dError("import timer is messed up, signature:%p", pImport); + return; + } + + SMeterObj * pObj = pImport->pObj; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + SShellObj * pShell = pImport->pShell; + + pImport->retry++; + pObj->state = TSDB_METER_STATE_IMPORTING; + + pthread_mutex_lock(&pPool->vmutex); + if (pPool->commitInProcess || pObj->numOfQueries > 0) { + pthread_mutex_unlock(&pPool->vmutex); + pObj->state = TSDB_METER_STATE_READY; + if (pImport->retry < 1000) { + dTrace("vid:%d sid:%d id:%s, commit in process, try to import later", pObj->vnode, pObj->sid, pObj->meterId); + taosTmrStart(vnodeProcessImportTimer, 10, pImport, vnodeTmrCtrl); + return; + } else { + pShell->code = TSDB_CODE_TOO_SLOW; + } + } else { + pPool->commitInProcess = 1; + pthread_mutex_unlock(&pPool->vmutex); + int code = vnodeImportData(pObj, pImport); + if (pShell) { + pShell->code = code; + pShell->numOfTotalPoints += pImport->importedRows; + } + } + + pObj->state = TSDB_METER_STATE_READY; + pVnode->version++; + + // send response back to shell + if (pShell) { + pShell->count--; + if (pShell->count <= 0) vnodeSendShellSubmitRspMsg(pImport->pShell, pShell->code, pShell->numOfTotalPoints); + } + + pImport->signature = NULL; + free(pImport->payload); + free(pImport); +} + +int vnodeImportToFile(SImportInfo *pImport) { + SMeterObj * pObj = pImport->pObj; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + SVnodeCfg * pCfg = &pVnode->cfg; + SHeadInfo headInfo; + int code = 0, col; + SCompBlock compBlock; + char * payload = pImport->payload; + int rows = pImport->rows; + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + + TSKEY lastKey = *((TSKEY *)(payload + pObj->bytesPerPoint * (rows - 1))); + TSKEY firstKey = *((TSKEY *)payload); + memset(&headInfo, 0, sizeof(headInfo)); + headInfo.headList = malloc(sizeof(SCompHeader) * pCfg->maxSessions + sizeof(TSCKSUM)); + + SData *cdata[TSDB_MAX_COLUMNS]; + char * buffer1 = + malloc(pObj->bytesPerPoint * pCfg->rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES) * pObj->numOfColumns); + cdata[0] = (SData *)buffer1; + + SData *data[TSDB_MAX_COLUMNS]; + char * buffer2 = + malloc(pObj->bytesPerPoint * pCfg->rowsInFileBlock + (sizeof(SData) + EXTRA_BYTES) * pObj->numOfColumns); + data[0] = (SData *)buffer2; + + for (col = 1; col < pObj->numOfColumns; ++col) { + cdata[col] = (SData *)(((char *)cdata[col - 1]) + sizeof(SData) + EXTRA_BYTES + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); + data[col] = (SData *)(((char *)data[col - 1]) + sizeof(SData) + EXTRA_BYTES + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); + } + + int rowsBefore = 0; + int rowsRead = 0; + int rowsUnread = 0; + int leftRows = rows; // left number of rows of imported data + int row, rowsToWrite; + int64_t offset[TSDB_MAX_COLUMNS]; + + if (pImport->pos > 0) { + for (col = 0; col < pObj->numOfColumns; ++col) + memcpy(data[col]->data, pImport->sdata[col]->data, pImport->pos * pObj->schema[col].bytes); + + rowsBefore = pImport->pos; + rowsRead = pImport->pos; + rowsUnread = pImport->numOfPoints - pImport->pos; + } + + dTrace("vid:%d sid:%d id:%s, %d rows data will be imported to file, firstKey:%ld lastKey:%ld", + pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey); + do { + if (leftRows > 0) { + code = vnodeOpenFileForImport(pImport, payload, &headInfo, data); + if (code < 0) goto _exit; + if (code > 0) { + rowsBefore = code; + code = 0; + }; + } else { + // if payload is already imported, rows unread shall still be processed + rowsBefore = 0; + } + + int rowsToProcess = pObj->pointsPerFileBlock - rowsBefore; + if (rowsToProcess > leftRows) rowsToProcess = leftRows; + + for (col = 0; col < pObj->numOfColumns; ++col) { + offset[col] = data[col]->data + rowsBefore * pObj->schema[col].bytes; + } + + row = 0; + if (leftRows > 0) { + for (row = 0; row < rowsToProcess; ++row) { + if (*((TSKEY *)payload) > pVnode->commitLastKey) break; + + for (col = 0; col < pObj->numOfColumns; ++col) { + memcpy(offset[col], payload, pObj->schema[col].bytes); + payload += pObj->schema[col].bytes; + offset[col] += pObj->schema[col].bytes; + } + } + } + + leftRows -= row; + rowsToWrite = rowsBefore + row; + rowsBefore = 0; + + if (leftRows == 0 && rowsUnread > 0) { + // copy the unread + int rowsToCopy = pObj->pointsPerFileBlock - rowsToWrite; + if (rowsToCopy > rowsUnread) rowsToCopy = rowsUnread; + + for (col = 0; col < pObj->numOfColumns; ++col) { + int bytes = pObj->schema[col].bytes; + memcpy(data[col]->data + rowsToWrite * bytes, pImport->sdata[col]->data + rowsRead * bytes, rowsToCopy * bytes); + } + + rowsRead += rowsToCopy; + rowsUnread -= rowsToCopy; + rowsToWrite += rowsToCopy; + } + + for (col = 0; col < pObj->numOfColumns; ++col) { + data[col]->len = rowsToWrite * pObj->schema[col].bytes; + } + + compBlock.last = headInfo.last; + vnodeWriteBlockToFile(pObj, &compBlock, data, cdata, rowsToWrite); + write(pVnode->nfd, &compBlock, sizeof(SCompBlock)); + + rowsToWrite = 0; + headInfo.newBlocks++; + + } while (leftRows > 0 || rowsUnread > 0); + + if (compBlock.keyLast > pObj->lastKeyOnFile) + pObj->lastKeyOnFile = compBlock.keyLast; + + vnodeCloseFileForImport(pObj, &headInfo); + dTrace("vid:%d sid:%d id:%s, %d rows data are imported to file", pObj->vnode, pObj->sid, pObj->meterId, rows); + + SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; + pthread_mutex_lock(&pPool->vmutex); + + if (pInfo->numOfBlocks > 0) { + int slot = (pInfo->currentSlot - pInfo->numOfBlocks + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + TSKEY firstKeyInCache = *((TSKEY *)(pInfo->cacheBlocks[slot]->offset[0])); + + // data may be in commited cache, cache shall be released + if (lastKey > firstKeyInCache) { + while (slot != pInfo->commitSlot) { + SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; + vnodeFreeCacheBlock(pCacheBlock); + slot = (slot + 1 + pInfo->maxBlocks) % pInfo->maxBlocks; + } + + // last slot, the uncommitted slots shall be shifted + SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; + int points = pCacheBlock->numOfPoints - pInfo->commitPoint; + if (points > 0) { + for (int col = 0; col < pObj->numOfColumns; ++col) { + int size = points * pObj->schema[col].bytes; + memmove(pCacheBlock->offset[col], pCacheBlock->offset[col] + pObj->schema[col].bytes * pInfo->commitPoint, size); + } + } + + if (pInfo->commitPoint != pObj->pointsPerBlock) { + // commit point shall be set to 0 if last block is not full + pInfo->commitPoint = 0; + pCacheBlock->numOfPoints = points; + if (slot == pInfo->currentSlot) { + __sync_fetch_and_add(&pObj->freePoints, pInfo->commitPoint); + } + } else { + // if last block is full and committed + SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; + if (pCacheBlock->pMeterObj == pObj) { + vnodeFreeCacheBlock(pCacheBlock); + } + } + } + } + + if (lastKey > pObj->lastKeyOnFile) pObj->lastKeyOnFile = lastKey; + + pthread_mutex_unlock(&pPool->vmutex); + +_exit: + tfree(headInfo.headList); + tfree(buffer1); + tfree(buffer2); + tfree(pImport->buffer); + + return code; +} + +int vnodeImportToCache(SImportInfo *pImport, char *payload, int rows) { + SMeterObj * pObj = pImport->pObj; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + SVnodeCfg * pCfg = &pVnode->cfg; + int code = -1; + SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; + int slot, pos, row, col, points, tpoints; + + char *data[TSDB_MAX_COLUMNS], *current[TSDB_MAX_COLUMNS]; + int slots = pInfo->unCommittedBlocks + 1; + int trows = slots * pObj->pointsPerBlock + rows; // max rows in buffer + int tsize = (trows / pObj->pointsPerBlock + 1) * pCfg->cacheBlockSize; + TSKEY firstKey = *((TSKEY *)payload); + TSKEY lastKey = *((TSKEY *)(payload + pObj->bytesPerPoint * (rows - 1))); + + if (pObj->freePoints < rows || pObj->freePoints < (pObj->pointsPerBlock << 1)) { + dError("vid:%d sid:%d id:%s, import failed, cache is full, freePoints:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->freePoints); + pImport->importedRows = 0; + pImport->commit = 1; + code = TSDB_CODE_ACTION_IN_PROGRESS; + return code; + } + + dTrace("vid:%d sid:%d id:%s, %d rows data will be imported to cache, firstKey:%ld lastKey:%ld", + pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey); + + pthread_mutex_lock(&(pVnode->vmutex)); + if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + pthread_mutex_unlock(&(pVnode->vmutex)); + + char *buffer = malloc(tsize); // buffer to hold unCommitted data plus import data + data[0] = buffer; + current[0] = data[0]; + for (col = 1; col < pObj->numOfColumns; ++col) { + data[col] = data[col - 1] + trows * pObj->schema[col - 1].bytes; + current[col] = data[col]; + } + + // write import data into buffer first + for (row = 0; row < rows; ++row) { + for (col = 0; col < pObj->numOfColumns; ++col) { + memcpy(current[col], payload, pObj->schema[col].bytes); + payload += pObj->schema[col].bytes; + current[col] += pObj->schema[col].bytes; + } + } + + // copy the overwritten data into buffer + tpoints = rows; + pos = pImport->pos; + slot = pImport->slot; + while (1) { + points = pInfo->cacheBlocks[slot]->numOfPoints - pos; + for (col = 0; col < pObj->numOfColumns; ++col) { + int size = points * pObj->schema[col].bytes; + memcpy(current[col], pInfo->cacheBlocks[slot]->offset[col] + pos * pObj->schema[col].bytes, size); + current[col] += size; + } + pos = 0; + tpoints += points; + + if (slot == pInfo->currentSlot) break; + slot = (slot + 1) % pInfo->maxBlocks; + } + + for (col = 0; col < pObj->numOfColumns; ++col) current[col] = data[col]; + pos = pImport->pos; + + // write back to existing slots first + slot = pImport->slot; + while (1) { + points = (tpoints > pObj->pointsPerBlock - pos) ? pObj->pointsPerBlock - pos : tpoints; + SCacheBlock *pCacheBlock = pInfo->cacheBlocks[slot]; + for (col = 0; col < pObj->numOfColumns; ++col) { + int size = points * pObj->schema[col].bytes; + memcpy(pCacheBlock->offset[col] + pos * pObj->schema[col].bytes, current[col], size); + current[col] += size; + } + pCacheBlock->numOfPoints = points + pos; + pos = 0; + tpoints -= points; + + if (slot == pInfo->currentSlot) break; + slot = (slot + 1) % pInfo->maxBlocks; + } + + // allocate new cache block if there are still data left + while (tpoints > 0) { + pImport->commit = vnodeAllocateCacheBlock(pObj); + if (pImport->commit < 0) goto _exit; + points = (tpoints > pObj->pointsPerBlock) ? pObj->pointsPerBlock : tpoints; + SCacheBlock *pCacheBlock = pInfo->cacheBlocks[pInfo->currentSlot]; + for (col = 0; col < pObj->numOfColumns; ++col) { + int size = points * pObj->schema[col].bytes; + memcpy(pCacheBlock->offset[col] + pos * pObj->schema[col].bytes, current[col], size); + current[col] += size; + } + tpoints -= points; + pCacheBlock->numOfPoints = points; + } + + code = 0; + __sync_fetch_and_sub(&pObj->freePoints, rows); + dTrace("vid:%d sid:%d id:%s, %d rows data are imported to cache", pObj->vnode, pObj->sid, pObj->meterId, rows); + +_exit: + free(buffer); + return code; +} + +int vnodeFindKeyInFile(SImportInfo *pImport, int order) { + SMeterObj * pObj = pImport->pObj; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + int code = -1; + SQuery query; + SColumnFilter colList[TSDB_MAX_COLUMNS] = {0}; + + TSKEY key = order ? pImport->firstKey : pImport->lastKey; + memset(&query, 0, sizeof(query)); + query.order.order = order; + query.skey = key; + query.ekey = order ? INT64_MAX : 0; + query.colList = colList; + query.numOfCols = pObj->numOfColumns; + + for (int16_t i = 0; i < pObj->numOfColumns; ++i) { + colList[i].data.colId = pObj->schema[i].colId; + colList[i].colIdx = i; + colList[i].colIdxInBuf = i; + } + + int ret = vnodeSearchPointInFile(pObj, &query); + + if (ret >= 0) { + if (query.slot < 0) { + pImport->slot = 0; + pImport->pos = 0; + pImport->key = 0; + pImport->fileId = pVnode->fileId - pVnode->numOfFiles + 1; + dTrace("vid:%d sid:%d id:%s, import to head of file", pObj->vnode, pObj->sid, pObj->meterId); + code = 0; + } else if (query.slot >= 0) { + code = 0; + pImport->slot = query.slot; + pImport->pos = query.pos; + pImport->key = query.key; + pImport->fileId = query.fileId; + SCompBlock *pBlock = &query.pBlock[query.slot]; + pImport->numOfPoints = pBlock->numOfPoints; + + if (pImport->key != key) { + if (order == 0) { + pImport->pos++; + + if (pImport->pos >= pBlock->numOfPoints) { + pImport->slot++; + pImport->pos = 0; + } + } else { + if (pImport->pos < 0) pImport->pos = 0; + } + } + + if (pImport->key != key && pImport->pos > 0) { + if ( pObj->sversion != pBlock->sversion ) { + dError("vid:%d sid:%d id:%s, import sversion not matached, expected:%d received:%d", pObj->vnode, pObj->sid, + pBlock->sversion, pObj->sversion); + code = TSDB_CODE_OTHERS; + } else { + pImport->offset = pBlock->offset; + + pImport->buffer = + malloc(pObj->bytesPerPoint * pVnode->cfg.rowsInFileBlock + sizeof(SData) * pObj->numOfColumns); + pImport->sdata[0] = (SData *)pImport->buffer; + for (int col = 1; col < pObj->numOfColumns; ++col) + pImport->sdata[col] = (SData *)(((char *)pImport->sdata[col - 1]) + sizeof(SData) + + pObj->pointsPerFileBlock * pObj->schema[col - 1].bytes); + + code = vnodeReadCompBlockToMem(pObj, &query, pImport->sdata); + if (code < 0) { + code = -code; + tfree(pImport->buffer); + } + } + } + } + } else { + dError("vid:%d sid:%d id:%s, file is corrupted, import failed", pObj->vnode, pObj->sid, pObj->meterId); + code = -ret; + } + + tclose(query.hfd); + tclose(query.dfd); + tclose(query.lfd); + vnodeFreeFields(&query); + tfree(query.pBlock); + + return code; +} + +int vnodeFindKeyInCache(SImportInfo *pImport, int order) { + SMeterObj * pObj = pImport->pObj; + int code = 0; + SQuery query; + SCacheInfo *pInfo = (SCacheInfo *)pObj->pCache; + + TSKEY key = order ? pImport->firstKey : pImport->lastKey; + memset(&query, 0, sizeof(query)); + query.order.order = order; + query.skey = key; + query.ekey = order ? pImport->lastKey : pImport->firstKey; + vnodeSearchPointInCache(pObj, &query); + + if (query.slot < 0) { + pImport->slot = pInfo->commitSlot; + if (pInfo->commitPoint >= pObj->pointsPerBlock) pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; + pImport->pos = 0; + pImport->key = 0; + dTrace("vid:%d sid:%d id:%s, key:%ld, import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); + code = 0; + } else { + pImport->slot = query.slot; + pImport->pos = query.pos; + pImport->key = query.key; + + if (key != query.key) { + if (order == 0) { + // since pos is the position which has smaller key, data shall be imported after it + pImport->pos++; + if (pImport->pos >= pObj->pointsPerBlock) { + pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; + pImport->pos = 0; + } + } else { + if (pImport->pos < 0) pImport->pos = 0; + } + } + code = 0; + } + + return code; +} + +int vnodeImportStartToCache(SImportInfo *pImport, char *payload, int rows) { + int code = 0; + SMeterObj *pObj = pImport->pObj; + + code = vnodeFindKeyInCache(pImport, 1); + if (code != 0) return code; + + if (pImport->key != pImport->firstKey) { + rows = vnodeGetImportStartPart(pObj, payload, rows, pImport->key); + pImport->importedRows = rows; + code = vnodeImportToCache(pImport, payload, rows); + } else { + dError("vid:%d sid:%d id:%s, data is already imported to cache", pObj->vnode, pObj->sid, pObj->meterId); + } + + return code; +} + +int vnodeImportStartToFile(SImportInfo *pImport, char *payload, int rows) { + int code = 0; + SMeterObj *pObj = pImport->pObj; + + code = vnodeFindKeyInFile(pImport, 1); + if (code != 0) return code; + + if (pImport->key != pImport->firstKey) { + pImport->payload = payload; + pImport->rows = vnodeGetImportStartPart(pObj, payload, rows, pImport->key); + pImport->importedRows = pImport->rows; + code = vnodeImportToFile(pImport); + } else { + dError("vid:%d sid:%d id:%s, data is already imported to file", pObj->vnode, pObj->sid, pObj->meterId); + } + + return code; +} + +int vnodeImportWholeToFile(SImportInfo *pImport, char *payload, int rows) { + int code = 0; + SMeterObj *pObj = pImport->pObj; + + code = vnodeFindKeyInFile(pImport, 0); + if (code != 0) return code; + + if (pImport->key != pImport->lastKey) { + pImport->payload = payload; + pImport->rows = vnodeGetImportEndPart(pObj, payload, rows, &pImport->payload, pImport->key); + pImport->importedRows = pImport->rows; + code = vnodeImportToFile(pImport); + } else { + code = vnodeImportStartToFile(pImport, payload, rows); + } + + return code; +} + +int vnodeImportWholeToCache(SImportInfo *pImport, char *payload, int rows) { + int code = 0; + SMeterObj *pObj = pImport->pObj; + + code = vnodeFindKeyInCache(pImport, 0); + if (code != 0) return code; + + if (pImport->key != pImport->lastKey) { + char *pStart; + if ( pImport->key < pObj->lastKeyOnFile ) pImport->key = pObj->lastKeyOnFile; + rows = vnodeGetImportEndPart(pObj, payload, rows, &pStart, pImport->key); + pImport->importedRows = rows; + code = vnodeImportToCache(pImport, pStart, rows); + } else { + if (pImport->firstKey > pObj->lastKeyOnFile) { + code = vnodeImportStartToCache(pImport, payload, rows); + } else if (pImport->firstKey < pObj->lastKeyOnFile) { + code = vnodeImportStartToFile(pImport, payload, rows); + } else { // firstKey == pObj->lastKeyOnFile + dError("vid:%d sid:%d id:%s, data is already there", pObj->vnode, pObj->sid, pObj->meterId); + } + } + + return code; +} + +int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, void *param, int sversion, + int *pNumOfPoints) { + SSubmitMsg *pSubmit = (SSubmitMsg *)cont; + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + int rows; + char * payload; + int code = TSDB_CODE_ACTION_IN_PROGRESS; + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + SShellObj * pShell = (SShellObj *)param; + int pointsImported = 0; + + rows = htons(pSubmit->numOfRows); + int expectedLen = rows * pObj->bytesPerPoint + sizeof(pSubmit->numOfRows); + if (expectedLen != contLen) { + dError("vid:%d sid:%d id:%s, invalid import, expected:%d, contLen:%d", pObj->vnode, pObj->sid, pObj->meterId, + expectedLen, contLen); + return TSDB_CODE_WRONG_MSG_SIZE; + } + + if (sversion != pObj->sversion) { + dError("vid:%d sid:%d id:%s, invalid sversion, expected:%d received:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->sversion, sversion); + return TSDB_CODE_OTHERS; + } + + payload = pSubmit->payLoad; + if (pVnode->lastKeyOnFile > pVnode->cfg.daysToKeep * tsMsPerDay[pVnode->cfg.precision] + *((TSKEY *)(payload))) { + dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is too old to import, key:%lld", + pObj->vnode, pObj->sid, pObj->meterId, pVnode->lastKeyOnFile, *(TSKEY *)(payload)); + return TSDB_CODE_OTHERS; + } + + if ( pVnode->cfg.commitLog && source != TSDB_DATA_SOURCE_LOG) { + code = vnodeWriteToCommitLog(pObj, TSDB_ACTION_IMPORT, cont, contLen, sversion); + if (code != 0) return code; + } + + if (*((TSKEY *)(pSubmit->payLoad + (rows - 1) * pObj->bytesPerPoint)) > pObj->lastKey) { + code = vnodeInsertPoints(pObj, cont, contLen, TSDB_DATA_SOURCE_LOG, NULL, pObj->sversion, &pointsImported); + if (pShell) { + pShell->code = code; + pShell->numOfTotalPoints += pointsImported; + } + } else { + SImportInfo *pNew, import; + + pObj->state = TSDB_METER_STATE_IMPORTING; + dTrace("vid:%d sid:%d id:%s, import %d rows data", pObj->vnode, pObj->sid, pObj->meterId, rows); + memset(&import, 0, sizeof(import)); + import.firstKey = *((TSKEY *)(payload)); + import.lastKey = *((TSKEY *)(pSubmit->payLoad + (rows - 1) * pObj->bytesPerPoint)); + import.pObj = pObj; + import.pShell = pShell; + import.payload = payload; + import.rows = rows; + + pthread_mutex_lock(&pPool->vmutex); + if (pPool->commitInProcess || pObj->numOfQueries > 0) { + pthread_mutex_unlock(&pPool->vmutex); + pObj->state = TSDB_METER_STATE_READY; + + pNew = (SImportInfo *)malloc(sizeof(SImportInfo)); + memcpy(pNew, &import, sizeof(SImportInfo)); + pNew->signature = pNew; + int payloadLen = contLen - sizeof(SSubmitMsg); + pNew->payload = malloc(payloadLen); + memcpy(pNew->payload, payload, payloadLen); + + dTrace("vid:%d sid:%d id:%s, commit/query:%d in process, import later, ", pObj->vnode, pObj->sid, pObj->meterId, + pObj->numOfQueries); + taosTmrStart(vnodeProcessImportTimer, 10, pNew, vnodeTmrCtrl); + return 0; + } else { + pPool->commitInProcess = 1; + pthread_mutex_unlock(&pPool->vmutex); + int code = vnodeImportData(pObj, &import); + if (pShell) { + pShell->code = code; + pShell->numOfTotalPoints += import.importedRows; + } + } + } + + pObj->state = TSDB_METER_STATE_READY; + pVnode->version++; + + if (pShell) { + pShell->count--; + if (pShell->count <= 0) vnodeSendShellSubmitRspMsg(pShell, pShell->code, pShell->numOfTotalPoints); + } + + return 0; +} + +int vnodeImportData(SMeterObj *pObj, SImportInfo *pImport) { + int code = 0; + + if (pImport->lastKey > pObj->lastKeyOnFile) { + code = vnodeImportWholeToCache(pImport, pImport->payload, pImport->rows); + } else if (pImport->lastKey < pObj->lastKeyOnFile) { + code = vnodeImportWholeToFile(pImport, pImport->payload, pImport->rows); + } else { // lastKey == pObj->lastkeyOnFile + code = vnodeImportStartToFile(pImport, pImport->payload, pImport->rows); + } + + SVnodeObj * pVnode = &vnodeList[pObj->vnode]; + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + pPool->commitInProcess = 0; + + if (pImport->commit) vnodeProcessCommitTimer(pVnode, NULL); + + return code; +} diff --git a/src/system/src/vnodeMeter.c b/src/system/src/vnodeMeter.c new file mode 100644 index 000000000000..9f4b3ec3d89c --- /dev/null +++ b/src/system/src/vnodeMeter.c @@ -0,0 +1,726 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "trpc.h" +#include "tschemautil.h" +#include "ttime.h" +#include "tutil.h" +#include "vnode.h" +#include "vnodeMgmt.h" +#include "vnodeShell.h" +#include "vnodeUtil.h" + +#define VALID_TIMESTAMP(key, curKey, prec) (((key) >= 0) && ((key) <= ((curKey) + 36500 * tsMsPerDay[prec]))) + +int tsMeterSizeOnFile; +void vnodeUpdateMeter(void *param, void *tmdId); +void vnodeRecoverMeterObjectFile(int vnode); + +int (*vnodeProcessAction[])(SMeterObj *, char *, int, char, void *, int, int *) = {vnodeInsertPoints, + vnodeImportPoints}; + +void vnodeFreeMeterObj(SMeterObj *pObj) { + if (pObj == NULL) return; + + dTrace("vid:%d sid:%d id:%s, meter is cleaned up", pObj->vnode, pObj->sid, pObj->meterId); + + vnodeFreeCacheInfo(pObj); + if (vnodeList[pObj->vnode].meterList != NULL) { + vnodeList[pObj->vnode].meterList[pObj->sid] = NULL; + } + tfree(pObj); +} + +int vnodeUpdateVnodeStatistic(FILE *fp, SVnodeObj *pVnode) { + fseek(fp, TSDB_FILE_HEADER_VERSION_SIZE, SEEK_SET); + fwrite(&(pVnode->vnodeStatistic), sizeof(SVnodeStatisticInfo), 1, fp); + + return 0; +} + +void vnodeUpdateVnodeFileHeader(FILE *fp, SVnodeObj *pVnode) { + fseek(fp, TSDB_FILE_HEADER_LEN * 1 / 4, SEEK_SET); + fprintf(fp, "%ld %ld %ld ", pVnode->lastCreate, pVnode->lastRemove, pVnode->version); + fprintf(fp, "%ld %d %d ", pVnode->lastKeyOnFile, pVnode->fileId, pVnode->numOfFiles); +} + +int vnodeCreateMeterObjFile(int vnode) { + FILE * fp; + char fileName[TSDB_FILENAME_LEN]; + int32_t size; + // SMeterObj *pObj; + + sprintf(fileName, "%s/vnode%d/meterObj.v%d", tsDirectory, vnode, vnode); + fp = fopen(fileName, "w+"); + if (fp == NULL) { + dError("failed to create vnode:%d file:%s", vnode, fileName); + return -1; + } else { + vnodeCreateFileHeader(fp); + vnodeUpdateVnodeFileHeader(fp, vnodeList + vnode); + fseek(fp, TSDB_FILE_HEADER_LEN, SEEK_SET); + + size = sizeof(SMeterObjHeader) * vnodeList[vnode].cfg.maxSessions + sizeof(TSCKSUM); + tfree(vnodeList[vnode].meterIndex); + vnodeList[vnode].meterIndex = calloc(1, size); + taosCalcChecksumAppend(0, (uint8_t *)(vnodeList[vnode].meterIndex), size); + fwrite(vnodeList[vnode].meterIndex, size, 1, fp); + + fclose(fp); + } + + return 0; +} + +FILE *vnodeOpenMeterObjFile(int vnode) { + FILE * fp; + char fileName[TSDB_FILENAME_LEN]; + struct stat fstat; + + // check if directory exists + sprintf(fileName, "%s/vnode%d", tsDirectory, vnode); + if (stat(fileName, &fstat) < 0) return NULL; + + sprintf(fileName, "%s/vnode%d/meterObj.v%d", tsDirectory, vnode, vnode); + if (stat(fileName, &fstat) < 0) return NULL; + + fp = fopen(fileName, "r+"); + if (fp != NULL) { + if (vnodeCheckFileIntegrity(fp) < 0) { + dError("file:%s is corrupted, need to restore it first", fileName); + fclose(fp); + + // todo: how to recover + exit(1); + } + } else { + dError("failed to open %s, reason:%s", fileName, strerror(errno)); + } + + return fp; +} + +int vnodeSaveMeterObjToFile(SMeterObj *pObj) { + int64_t offset, length, new_length, new_offset; + FILE * fp; + SVnodeObj *pVnode = &vnodeList[pObj->vnode]; + char * buffer = NULL; + + fp = vnodeOpenMeterObjFile(pObj->vnode); + if (fp == NULL) return -1; + + buffer = (char *)malloc(tsMeterSizeOnFile); + if (buffer == NULL) { + dError("Failed to allocate memory while saving meter object to file, meterId", pObj->meterId); + fclose(fp); + return -1; + } + + offset = pVnode->meterIndex[pObj->sid].offset; + length = pVnode->meterIndex[pObj->sid].length; + + new_length = offsetof(SMeterObj, reserved) + pObj->numOfColumns * sizeof(SColumn) + pObj->sqlLen + sizeof(TSCKSUM); + + memcpy(buffer, pObj, offsetof(SMeterObj, reserved)); + memcpy(buffer + offsetof(SMeterObj, reserved), pObj->schema, pObj->numOfColumns * sizeof(SColumn)); + memcpy(buffer + offsetof(SMeterObj, reserved) + pObj->numOfColumns * sizeof(SColumn), pObj->pSql, pObj->sqlLen); + taosCalcChecksumAppend(0, buffer, new_length); + + if (offset == 0 || length < new_length) { // New, append to file end + fseek(fp, 0, SEEK_END); + new_offset = ftell(fp); + fwrite(buffer, new_length, 1, fp); + pVnode->meterIndex[pObj->sid].offset = new_offset; + pVnode->meterIndex[pObj->sid].length = new_length; + } else if (offset < 0) { // deleted meter, append to end of file + fseek(fp, -offset, SEEK_SET); + fwrite(buffer, new_length, 1, fp); + pVnode->meterIndex[pObj->sid].offset = -offset; + pVnode->meterIndex[pObj->sid].length = new_length; + } else { // meter exists, overwrite it, offset > 0 + fseek(fp, offset, SEEK_SET); + fwrite(buffer, new_length, 1, fp); + pVnode->meterIndex[pObj->sid].offset = (pObj->meterId[0] == 0) ? -offset : offset; + pVnode->meterIndex[pObj->sid].length = new_length; + } + // taosCalcChecksumAppend(0, pVnode->meterIndex, sizeof(SMeterObjHeader)*pVnode->cfg.maxSessions+sizeof(TSCKSUM)); + // NOTE: no checksum, since it makes creating table slow + fseek(fp, TSDB_FILE_HEADER_LEN + sizeof(SMeterObjHeader) * pObj->sid, SEEK_SET); + fwrite(&(pVnode->meterIndex[pObj->sid]), sizeof(SMeterObjHeader), 1, fp); + // update checksum + // fseek(fp, TSDB_FILE_HEADER_LEN+sizeof(SMeterObjHeader)*(pVnode->cfg.maxSessions), SEEK_SET); + // fwrite(((char *)(pVnode->meterIndex) + sizeof(SMeterObjHeader)*(pVnode->cfg.maxSessions)), sizeof(TSCKSUM), 1, fp); + + tfree(buffer); + + vnodeUpdateVnodeStatistic(fp, pVnode); + vnodeUpdateVnodeFileHeader(fp, pVnode); + /* vnodeUpdateFileCheckSum(fp); */ + fclose(fp); + + return 0; +} + +int vnodeSaveAllMeterObjToFile(int vnode) { + int64_t offset, length, new_length, new_offset; + FILE * fp; + SMeterObj *pObj; + SVnodeObj *pVnode = &vnodeList[vnode]; + char * buffer = NULL; + + fp = vnodeOpenMeterObjFile(vnode); + if (fp == NULL) return -1; + + buffer = (char *)malloc(tsMeterSizeOnFile); + if (buffer == NULL) { + dError("Failed to allocate memory while saving all meter objects to file"); + return -1; + } + + for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { + pObj = pVnode->meterList[sid]; + if (pObj == NULL) continue; + + offset = pVnode->meterIndex[sid].offset; + length = pVnode->meterIndex[sid].length; + + new_length = offsetof(SMeterObj, reserved) + pObj->numOfColumns * sizeof(SColumn) + pObj->sqlLen + sizeof(TSCKSUM); + + memcpy(buffer, pObj, offsetof(SMeterObj, reserved)); + memcpy(buffer + offsetof(SMeterObj, reserved), pObj->schema, pObj->numOfColumns * sizeof(SColumn)); + memcpy(buffer + offsetof(SMeterObj, reserved) + pObj->numOfColumns * sizeof(SColumn), pObj->pSql, pObj->sqlLen); + taosCalcChecksumAppend(0, buffer, new_length); + + if (offset == 0 || length > new_length) { // New, append to file end + new_offset = fseek(fp, 0, SEEK_END); + fwrite(buffer, new_length, 1, fp); + pVnode->meterIndex[sid].offset = new_offset; + pVnode->meterIndex[sid].length = new_length; + } else if (offset < 0) { // deleted meter, append to end of file + fseek(fp, -offset, SEEK_SET); + fwrite(buffer, new_length, 1, fp); + pVnode->meterIndex[sid].offset = -offset; + pVnode->meterIndex[sid].length = new_length; + } else { // meter exists, overwrite it, offset > 0 + fseek(fp, offset, SEEK_SET); + fwrite(buffer, new_length, 1, fp); + pVnode->meterIndex[sid].offset = offset; + pVnode->meterIndex[sid].length = new_length; + } + } + // taosCalcChecksumAppend(0, pVnode->meterIndex, sizeof(SMeterObjHeader)*pVnode->cfg.maxSessions+sizeof(TSCKSUM)); + fseek(fp, TSDB_FILE_HEADER_LEN, SEEK_SET); + fwrite(pVnode->meterIndex, sizeof(SMeterObjHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM), 1, fp); + + tfree(buffer); + + vnodeUpdateVnodeStatistic(fp, pVnode); + vnodeUpdateVnodeFileHeader(fp, pVnode); + /* vnodeUpdateFileCheckSum(fp); */ + fclose(fp); + + return 0; +} + +int vnodeSaveVnodeCfg(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { + FILE *fp; + + fp = vnodeOpenMeterObjFile(vnode); + if (fp == NULL) { + dError("failed to open vnode:%d file", vnode); + return -1; + } + + fseek(fp, TSDB_FILE_HEADER_LEN * 2 / 4, SEEK_SET); + fwrite(pCfg, sizeof(SVnodeCfg), 1, fp); + + char temp[TSDB_FILE_HEADER_LEN / 4]; + memset(temp, 0, sizeof(temp)); + fseek(fp, TSDB_FILE_HEADER_LEN * 3 / 4, SEEK_SET); + fwrite(temp, sizeof(temp), 1, fp); + + if (pCfg->replications >= 1) { + fseek(fp, TSDB_FILE_HEADER_LEN * 3 / 4, SEEK_SET); + fwrite(pDesc, sizeof(SVPeerDesc), pCfg->replications, fp); + } + + /* vnodeUpdateFileCheckSum(fp); */ + fclose(fp); + + return 0; +} + +int vnodeSaveVnodeInfo(int vnode) { + FILE * fp; + SVnodeObj *pVnode = &vnodeList[vnode]; + + fp = vnodeOpenMeterObjFile(vnode); + if (fp == NULL) return -1; + + vnodeUpdateVnodeFileHeader(fp, pVnode); + /* vnodeUpdateFileCheckSum(fp); */ + fclose(fp); + + return 0; +} + +int vnodeRestoreMeterObj(char *buffer, int64_t length) { + SMeterObj *pSavedObj, *pObj; + int size; + + pSavedObj = (SMeterObj *)buffer; + if (pSavedObj->vnode < 0 || pSavedObj->vnode >= TSDB_MAX_VNODES) { + dTrace("vid:%d is out of range, corrupted meter obj file", pSavedObj->vnode); + return -1; + } + + SVnodeCfg *pCfg = &vnodeList[pSavedObj->vnode].cfg; + if (pSavedObj->sid < 0 || pSavedObj->sid >= pCfg->maxSessions) { + dTrace("vid:%d, sid:%d is larger than max:%d", pSavedObj->vnode, pSavedObj->sid, pCfg->maxSessions); + return -1; + } + + if (pSavedObj->meterId[0] == 0) return TSDB_CODE_SUCCESS; + + size = sizeof(SMeterObj) + pSavedObj->sqlLen + 1; + pObj = (SMeterObj *)malloc(size); + if (pObj == NULL) { + dError("vid:%d sid:%d, no memory to allocate", pSavedObj->vnode, pSavedObj->sid); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + memcpy(pObj, pSavedObj, offsetof(SMeterObj, reserved)); + vnodeList[pSavedObj->vnode].meterList[pSavedObj->sid] = pObj; + pObj->numOfQueries = 0; + pObj->pCache = vnodeAllocateCacheInfo(pObj); + pObj->pStream = NULL; + pObj->schema = (SColumn *)malloc(pSavedObj->numOfColumns * sizeof(SColumn)); + memcpy(pObj->schema, buffer + offsetof(SMeterObj, reserved), pSavedObj->numOfColumns * sizeof(SColumn)); + pObj->state = TSDB_METER_STATE_READY; + + if (pObj->sqlLen > 0) + memcpy((char *)pObj + sizeof(SMeterObj), + ((char *)pSavedObj) + offsetof(SMeterObj, reserved) + sizeof(SColumn) * pSavedObj->numOfColumns, + pSavedObj->sqlLen); + pObj->pSql = (char *)pObj + sizeof(SMeterObj); + + pObj->lastKey = pObj->lastKeyOnFile; + if (pObj->lastKey > vnodeList[pObj->vnode].lastKey) vnodeList[pObj->vnode].lastKey = pObj->lastKey; + + // taosSetSecurityInfo(pObj->vnode, pObj->sid, pObj->meterId, pObj->spi, pObj->encrypt, pObj->secret, pObj->cipheringKey); + + dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%ld", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); + return TSDB_CODE_SUCCESS; +} + +int vnodeOpenMetersVnode(int vnode) { + FILE * fp; + char * buffer; + int64_t sid; + int64_t offset, length; + SVnodeObj *pVnode = &vnodeList[vnode]; + + fp = vnodeOpenMeterObjFile(vnode); + if (fp == NULL) return 0; + + fseek(fp, TSDB_FILE_HEADER_VERSION_SIZE, SEEK_SET); + fread(&(pVnode->vnodeStatistic), sizeof(SVnodeStatisticInfo), 1, fp); + + fseek(fp, TSDB_FILE_HEADER_LEN * 1 / 4, SEEK_SET); + fscanf(fp, "%ld %ld %ld ", &(pVnode->lastCreate), &(pVnode->lastRemove), &(pVnode->version)); + fscanf(fp, "%ld %d %d ", &(pVnode->lastKeyOnFile), &(pVnode->fileId), &(pVnode->numOfFiles)); + + fseek(fp, TSDB_FILE_HEADER_LEN * 2 / 4, SEEK_SET); + fread(&pVnode->cfg, sizeof(SVnodeCfg), 1, fp); + if (vnodeIsValidVnodeCfg(&pVnode->cfg) == false) { + pVnode->cfg.maxSessions = 0; // error in vnode file + return 0; + } + + fseek(fp, TSDB_FILE_HEADER_LEN * 3 / 4, SEEK_SET); + fread(&pVnode->vpeers, sizeof(SVPeerDesc), TSDB_VNODES_SUPPORT, fp); + + fseek(fp, TSDB_FILE_HEADER_LEN, SEEK_SET); + + tsMeterSizeOnFile = sizeof(SMeterObj) + TSDB_MAX_COLUMNS * sizeof(SColumn) + TSDB_MAX_SAVED_SQL_LEN + sizeof(TSCKSUM); + + int size = sizeof(SMeterObj *) * pVnode->cfg.maxSessions; + pVnode->meterList = (void *)malloc(size); + if (pVnode->meterList == NULL) return -1; + memset(pVnode->meterList, 0, size); + size = sizeof(SMeterObjHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM); + pVnode->meterIndex = (SMeterObjHeader *)calloc(1, size); + if (pVnode->meterIndex == NULL) { + tfree(pVnode->meterList); + return -1; + } + + // Read SMeterObjHeader list from file + if (fread(pVnode->meterIndex, size, 1, fp) < 0) return -1; + // if (!taosCheckChecksumWhole(pVnode->meterIndex, size)) { + // dError("vid: %d meter obj file header is broken since checksum mismatch", vnode); + // return -1; + // } + + // Read the meter object from file and recover the structure + buffer = malloc(tsMeterSizeOnFile); + memset(buffer, 0, tsMeterSizeOnFile); + for (sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { + offset = pVnode->meterIndex[sid].offset; + length = pVnode->meterIndex[sid].length; + if (offset <= 0 || length <= 0) continue; + + fseek(fp, offset, SEEK_SET); + if (fread(buffer, length, 1, fp) <= 0) break; + if (taosCheckChecksumWhole(buffer, length)) { + vnodeRestoreMeterObj(buffer, length - sizeof(TSCKSUM)); + } else { + dError("meter object file is broken since checksum mismatch, vnode: %d sid: %d, try to recover", vnode, sid); + continue; + /* vnodeRecoverMeterObjectFile(vnode); */ + } + } + + tfree(buffer); + fclose(fp); + + return 0; +} + +void vnodeCloseMetersVnode(int vnode) { + SVnodeObj *pVnode = vnodeList + vnode; + SMeterObj *pObj; + + if (pVnode->meterList) { + for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { + pObj = pVnode->meterList[sid]; + if (pObj == NULL) continue; + vnodeFreeCacheInfo(pObj); + tfree(pObj->schema); + tfree(pObj); + } + + tfree(pVnode->meterList); + } + + pVnode->meterList = NULL; +} + +int vnodeCreateMeterObj(SMeterObj *pNew, SConnSec *pSec) { + SMeterObj *pObj; + int code; + + pObj = vnodeList[pNew->vnode].meterList[pNew->sid]; + code = TSDB_CODE_SUCCESS; + + if (pObj && pObj->uid == pNew->uid) { + if (pObj->sversion == pNew->sversion) { + dTrace("vid:%d sid:%d id:%s sversion:%d, identical meterObj, ignore create", pNew->vnode, pNew->sid, + pNew->meterId, pNew->sversion); + return -1; + } + + dTrace("vid:%d sid:%d id:%s, update schema", pNew->vnode, pNew->sid, pNew->meterId); + if (pObj->state != TSDB_METER_STATE_UPDATING) vnodeUpdateMeter(pNew, NULL); + return TSDB_CODE_SUCCESS; + } + + if (pObj) { + dWarn("vid:%d sid:%d id:%s, old meter is there, remove it", pNew->vnode, pNew->sid, pNew->meterId); + vnodeRemoveMeterObj(pNew->vnode, pNew->sid); + } + + pNew->pCache = vnodeAllocateCacheInfo(pNew); + if (pNew->pCache == NULL) { + code = TSDB_CODE_NO_RESOURCE; + } else { + vnodeList[pNew->vnode].meterList[pNew->sid] = pNew; + pNew->state = TSDB_METER_STATE_READY; + if (pNew->timeStamp > vnodeList[pNew->vnode].lastCreate) vnodeList[pNew->vnode].lastCreate = pNew->timeStamp; + vnodeSaveMeterObjToFile(pNew); + // vnodeCreateMeterMgmt(pNew, pSec); + vnodeCreateStream(pNew); + dTrace("vid:%d sid:%d id:%s, meterObj is created, uid:%ld", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); + } + + return code; +} + +int vnodeRemoveMeterObj(int vnode, int sid) { + SMeterObj *pObj; + + if (vnode < 0 || vnode >= TSDB_MAX_VNODES) { + dError("vid:%d is out of range", vnode); + return 0; + } + + SVnodeCfg *pCfg = &vnodeList[vnode].cfg; + if (sid < 0 || sid >= pCfg->maxSessions) { + dError("vid:%d, sid:%d is larger than max:%d or less than 0", vnode, sid, pCfg->maxSessions); + return 0; + } + + // vnode has been closed, no meters in this vnode + if (vnodeList[vnode].meterList == NULL) return 0; + + pObj = vnodeList[vnode].meterList[sid]; + if ((pObj == NULL) || (pObj->state == TSDB_METER_STATE_DELETED)) return 0; + if (pObj->state == TSDB_METER_STATE_IMPORTING) return TSDB_CODE_ACTION_IN_PROGRESS; + + int32_t retFlag = 0; + pthread_mutex_lock(&vnodeList[vnode].vmutex); + pObj->state = TSDB_METER_STATE_DELETING; + if (pObj->numOfQueries > 0) { + retFlag = TSDB_CODE_ACTION_IN_PROGRESS; + dWarn("vid:%d sid:%d id:%s %d queries executing on it, wait query to be finished", + vnode, pObj->sid, pObj->meterId, pObj->numOfQueries); + } + pthread_mutex_unlock(&vnodeList[vnode].vmutex); + if (retFlag != 0) return retFlag; + + // after remove this meter, change its stat to DELETED + pObj->state = TSDB_METER_STATE_DELETED; + pObj->timeStamp = taosGetTimestampMs(); + vnodeList[vnode].lastRemove = pObj->timeStamp; + + vnodeRemoveStream(pObj); + pObj->meterId[0] = 0; + vnodeSaveMeterObjToFile(pObj); + vnodeFreeMeterObj(pObj); + + return 0; +} + +int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, void *param, int sversion, + int *numOfInsertPoints) { + int expectedLen, i; + short numOfPoints; + SSubmitMsg *pSubmit = (SSubmitMsg *)cont; + char * pData; + TSKEY tsKey; + int points = 0; + int code = TSDB_CODE_SUCCESS; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + + numOfPoints = htons(pSubmit->numOfRows); + expectedLen = numOfPoints * pObj->bytesPerPoint + sizeof(pSubmit->numOfRows); + if (expectedLen != contLen) { + dError("vid:%d sid:%d id:%s, invalid submit msg length:%d, expected:%d, bytesPerPoint: %d", + pObj->vnode, pObj->sid, pObj->meterId, contLen, expectedLen, pObj->bytesPerPoint); + code = TSDB_CODE_WRONG_MSG_SIZE; + goto _over; + } + + // to guarantee time stamp is the same for all vnodes + pData = pSubmit->payLoad; + tsKey = taosGetTimestamp(pVnode->cfg.precision); + if (*((TSKEY *)pData) == 0) { + for (i = 0; i < numOfPoints; ++i) { + *((TSKEY *)pData) = tsKey++; + pData += pObj->bytesPerPoint; + } + } + + if (numOfPoints >= (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock) { + code = TSDB_CODE_BATCH_SIZE_TOO_BIG; + dError("vid:%d sid:%d id:%s, batch size too big, it shall be smaller than:%d", pObj->vnode, pObj->sid, + pObj->meterId, (pVnode->cfg.blocksPerMeter - 2) * pObj->pointsPerBlock); + return code; + } + + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + if (pObj->freePoints < numOfPoints || pObj->freePoints < (pObj->pointsPerBlock << 1) || + pPool->notFreeSlots > pVnode->cfg.cacheNumOfBlocks.totalBlocks - 2) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + dTrace("vid:%d sid:%d id:%s, cache is full, freePoints:%d, notFreeSlots:%d", pObj->vnode, pObj->sid, pObj->meterId, + pObj->freePoints, pPool->notFreeSlots); + vnodeProcessCommitTimer(pVnode, NULL); + return TSDB_CODE_ACTION_IN_PROGRESS; + } + + // FIXME: Here should be after the comparison of sversions. + if (pVnode->cfg.commitLog && source != TSDB_DATA_SOURCE_LOG) { + code = vnodeWriteToCommitLog(pObj, TSDB_ACTION_INSERT, cont, contLen, sversion); + if (code != 0) return code; + } + + if (pObj->sversion < sversion) { + dTrace("vid:%d sid:%d id:%s, schema is changed, new:%d old:%d", pObj->vnode, pObj->sid, pObj->meterId, sversion, + pObj->sversion); + vnodeSendMeterCfgMsg(pObj->vnode, pObj->sid); + code = TSDB_CODE_ACTION_IN_PROGRESS; + return code; + } + + pData = pSubmit->payLoad; + code = 0; + + TSKEY firstKey = *((TSKEY *)pData); + if (pVnode->lastKeyOnFile > pVnode->cfg.daysToKeep * tsMsPerDay[pVnode->cfg.precision] + firstKey) { + dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is too old to insert, key:%lld", pObj->vnode, pObj->sid, + pObj->meterId, pVnode->lastKeyOnFile, firstKey); + return TSDB_CODE_OTHERS; + } + + for (i = 0; i < numOfPoints; ++i) { + if (*((TSKEY *)pData) <= pObj->lastKey) { + dWarn("vid:%d sid:%d id:%s, received key:%ld not larger than lastKey:%ld", pObj->vnode, pObj->sid, pObj->meterId, + *((TSKEY *)pData), pObj->lastKey); + pData += pObj->bytesPerPoint; + continue; + } + + if (!VALID_TIMESTAMP(*((TSKEY *)pData), tsKey, pVnode->cfg.precision)) { + code = TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; + break; + } + + if (vnodeInsertPointToCache(pObj, pData) < 0) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + break; + } + + pObj->lastKey = *((TSKEY *)pData); + pData += pObj->bytesPerPoint; + points++; + } + __sync_fetch_and_add(&(pVnode->vnodeStatistic.pointsWritten), points * (pObj->numOfColumns - 1)); + __sync_fetch_and_add(&(pVnode->vnodeStatistic.totalStorage), points * pObj->bytesPerPoint); + + pthread_mutex_lock(&(pVnode->vmutex)); + + if (pObj->lastKey > pVnode->lastKey) pVnode->lastKey = pObj->lastKey; + + if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + + pVnode->version++; + + pthread_mutex_unlock(&(pVnode->vmutex)); + +_over: + dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%ld source:%d, vnode total storage: %ld", + pObj->vnode, pObj->sid, pObj->meterId, points, numOfPoints, pObj->lastKey, source, + pVnode->vnodeStatistic.totalStorage); + + *numOfInsertPoints = points; + return code; +} + +void vnodeProcessUpdateSchemaTimer(void *param, void *tmrId) { + SMeterObj * pObj = (SMeterObj *)param; + SVnodeObj * pVnode = vnodeList + pObj->vnode; + SCachePool *pPool = (SCachePool *)pVnode->pCachePool; + + pthread_mutex_lock(&pPool->vmutex); + if (pPool->commitInProcess) { + dTrace("vid:%d sid:%d mid:%s, commiting in process, commit later", pObj->vnode, pObj->sid, pObj->meterId); + if (taosTmrStart(vnodeProcessUpdateSchemaTimer, 10, pObj, vnodeTmrCtrl) == NULL) + pObj->state = TSDB_METER_STATE_READY; + pthread_mutex_unlock(&pPool->vmutex); + return; + } + + pPool->commitInProcess = 1; + pthread_mutex_unlock(&pPool->vmutex); + + vnodeCommitMultiToFile(pVnode, pObj->sid, pObj->sid); +} + +void vnodeUpdateMeter(void *param, void *tmrId) { + SMeterObj *pNew = (SMeterObj *)param; + if (pNew == NULL || pNew->vnode < 0 || pNew->sid < 0) return; + + if (vnodeList[pNew->vnode].meterList == NULL) { + dTrace("vid:%d sid:%d id:%s, vnode is deleted, abort update schema", pNew->vnode, pNew->sid, pNew->meterId); + free(pNew->schema); + free(pNew); + return; + } + + SMeterObj *pObj = vnodeList[pNew->vnode].meterList[pNew->sid]; + if (pObj == NULL) { + dTrace("vid:%d sid:%d id:%s, meter is deleted, abort update schema", pNew->vnode, pNew->sid, pNew->meterId); + free(pNew->schema); + free(pNew); + return; + } + + pObj->state = TSDB_METER_STATE_UPDATING; + + if (pObj->numOfQueries > 0) { + if (taosTmrStart(vnodeUpdateMeter, 50, pNew, vnodeTmrCtrl) == NULL) { + dError("vid:%d sid:%d id:%s, failed to start update timer", pNew->vnode, pNew->sid, pNew->meterId); + pObj->state = TSDB_METER_STATE_READY; + free(pNew->schema); + free(pNew); + } + + dTrace("vid:%d sid:%d id:%s, there are ongoing queries, update later", pNew->vnode, pNew->sid, pNew->meterId); + return; + } + + // commit first + if (!vnodeIsCacheCommitted(pObj)) { + // commit + if (taosTmrStart(vnodeProcessUpdateSchemaTimer, 0, pObj, vnodeTmrCtrl) == NULL) { + dError("vid:%d sid:%d id:%s, failed to start commit timer", pObj->vnode, pObj->sid, pObj->meterId); + pObj->state = TSDB_METER_STATE_READY; + free(pNew->schema); + free(pNew); + return; + } + + if (taosTmrStart(vnodeUpdateMeter, 50, pNew, vnodeTmrCtrl) == NULL) { + dError("vid:%d sid:%d id:%s, failed to start update timer", pNew->vnode, pNew->sid, pNew->meterId); + pObj->state = TSDB_METER_STATE_READY; + free(pNew->schema); + free(pNew); + } + + dTrace("vid:%d sid:%d meterId:%s, there are data in cache, commit first, update later", + pNew->vnode, pNew->sid, pNew->meterId); + return; + } + + strcpy(pObj->meterId, pNew->meterId); + pObj->numOfColumns = pNew->numOfColumns; + pObj->timeStamp = pNew->timeStamp; + pObj->bytesPerPoint = pNew->bytesPerPoint; + pObj->maxBytes = pNew->maxBytes; + if (pObj->timeStamp > vnodeList[pObj->vnode].lastCreate) vnodeList[pObj->vnode].lastCreate = pObj->timeStamp; + + tfree(pObj->schema); + pObj->schema = pNew->schema; + + vnodeFreeCacheInfo(pObj); + pObj->pCache = vnodeAllocateCacheInfo(pObj); + + pObj->sversion = pNew->sversion; + vnodeSaveMeterObjToFile(pObj); + pObj->state = TSDB_METER_STATE_READY; + + dTrace("vid:%d sid:%d id:%s, schema is updated", pNew->vnode, pNew->sid, pNew->meterId); + free(pNew); +} + +void vnodeRecoverMeterObjectFile(int vnode) { + // TODO: start the recovery process + assert(0); +} diff --git a/src/system/src/vnodeMeterTagMgmt.c b/src/system/src/vnodeMeterTagMgmt.c new file mode 100644 index 000000000000..b31b34ae6ad7 --- /dev/null +++ b/src/system/src/vnodeMeterTagMgmt.c @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "tsdb.h" + +#include "tlog.h" +#include "tutil.h" + +#include "taosmsg.h" +#include "textbuffer.h" + +#include "tast.h" +#include "vnodeTagMgmt.h" + +#define GET_TAG_VAL_POINTER(s, col, sc, t) ((t *)(&((s)->tags[(sc)->colOffset[(col)]]))) +#define GET_TAG_VAL(s, col, sc, t) (*GET_TAG_VAL_POINTER(s, col, sc, t)) + +static void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *pOrder); + +static void tSidSetDisplay(tSidSet *pSets); + +// todo merge with losertree_compar/ext_comp +int32_t doCompare(char *f1, char *f2, int32_t type, int32_t size) { + switch (type) { + case TSDB_DATA_TYPE_INT: + DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2)); + case TSDB_DATA_TYPE_DOUBLE: + DEFAULT_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2)); + case TSDB_DATA_TYPE_FLOAT: + DEFAULT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2)); + case TSDB_DATA_TYPE_BIGINT: + DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2)); + case TSDB_DATA_TYPE_SMALLINT: + DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2)); + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BOOL: + DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2)); + case TSDB_DATA_TYPE_NCHAR: { + int32_t ret = wcsncmp((wchar_t *)f1, (wchar_t *)f2, size / TSDB_NCHAR_SIZE); + if (ret == 0) { + return ret; + } + + return (ret < 0) ? -1 : 1; + } + default: { + int32_t ret = strncmp(f1, f2, (size_t)size); + if (ret == 0) { + return ret; + } + + return (ret < 0) ? -1 : 1; + } + } +} + +int32_t meterSidComparator(const void *p1, const void *p2, void *param) { + tOrderDescriptor *pOrderDesc = (tOrderDescriptor *)param; + + SMeterSidExtInfo *s1 = (SMeterSidExtInfo *)p1; + SMeterSidExtInfo *s2 = (SMeterSidExtInfo *)p2; + + for (int32_t i = 0; i < pOrderDesc->orderIdx.numOfOrderedCols; ++i) { + int32_t colIdx = pOrderDesc->orderIdx.pData[i]; + + char * f1 = NULL; + char * f2 = NULL; + int32_t type = 0; + int32_t bytes = 0; + + if (colIdx == -1) { + f1 = s1->tags; + f2 = s2->tags; + type = TSDB_DATA_TYPE_BINARY; + bytes = TSDB_METER_NAME_LEN; + } else { + f1 = GET_TAG_VAL_POINTER(s1, colIdx, pOrderDesc->pTagSchema, char); + f2 = GET_TAG_VAL_POINTER(s2, colIdx, pOrderDesc->pTagSchema, char); + SSchema *pSchema = &pOrderDesc->pTagSchema->pSchema[colIdx]; + type = pSchema->type; + bytes = pSchema->bytes; + } + + int32_t ret = doCompare(f1, f2, type, bytes); + if (ret == 0) { + continue; + } else { + return ret; + } + } + + return 0; +} + +static void median(void **pMeterSids, size_t size, int32_t s1, int32_t s2, tOrderDescriptor *pOrderDesc, + __ext_compar_fn_t compareFn) { + int32_t midIdx = ((s2 - s1) >> 1) + s1; + + if (compareFn(pMeterSids[midIdx], pMeterSids[s1], pOrderDesc) == 1) { + tsDataSwap(&pMeterSids[midIdx], &pMeterSids[s1], TSDB_DATA_TYPE_BINARY, size); + } + + if (compareFn(pMeterSids[midIdx], pMeterSids[s2], pOrderDesc) == 1) { + tsDataSwap(&pMeterSids[midIdx], &pMeterSids[s1], TSDB_DATA_TYPE_BINARY, size); + tsDataSwap(&pMeterSids[midIdx], &pMeterSids[s2], TSDB_DATA_TYPE_BINARY, size); + } else if (compareFn(pMeterSids[s1], pMeterSids[s2], pOrderDesc) == 1) { + tsDataSwap(&pMeterSids[s1], &pMeterSids[s2], TSDB_DATA_TYPE_BINARY, size); + } + + assert(compareFn(pMeterSids[midIdx], pMeterSids[s1], pOrderDesc) <= 0 && + compareFn(pMeterSids[s1], pMeterSids[s2], pOrderDesc) <= 0); + +#ifdef _DEBUG_VIEW + tTagsPrints(pMeterSids[s1], pOrderDesc->pTagSchema, &pOrderDesc->orderIdx); + tTagsPrints(pMeterSids[midIdx], pOrderDesc->pTagSchema, &pOrderDesc->orderIdx); + tTagsPrints(pMeterSids[s2], pOrderDesc->pTagSchema, &pOrderDesc->orderIdx); +#endif +} + +static void tInsertSort(void **pMeterSids, size_t size, int32_t startPos, int32_t endPos, void *param, + __ext_compar_fn_t compareFn) { + for (int32_t i = startPos + 1; i <= endPos; ++i) { + for (int32_t j = i; j > startPos; --j) { + if (compareFn(pMeterSids[j], pMeterSids[j - 1], param) == -1) { + tsDataSwap(&pMeterSids[j], &pMeterSids[j - 1], TSDB_DATA_TYPE_BINARY, size); + } else { + break; + } + } + } +} + +void tQSortEx(void **pMeterSids, size_t size, int32_t start, int32_t end, void *param, __ext_compar_fn_t compareFn) { + tOrderDescriptor *pOrderDesc = (tOrderDescriptor *)param; + + // short array sort, incur another sort procedure instead of quick sort process + if (end - start + 1 <= 8) { + tInsertSort(pMeterSids, size, start, end, pOrderDesc, compareFn); + return; + } + + median(pMeterSids, size, start, end, pOrderDesc, compareFn); + + int32_t s = start, e = end; + int32_t endRightS = end, startLeftS = start; + + while (s < e) { + while (e > s) { + int32_t ret = compareFn(pMeterSids[e], pMeterSids[s], pOrderDesc); + if (ret < 0) { + break; + } + + /* + * move the data that equals to pivotal value to the right end of the list + */ + if (ret == 0 && e != endRightS) { + tsDataSwap(&pMeterSids[e], &pMeterSids[endRightS--], TSDB_DATA_TYPE_BINARY, size); + } + + e--; + } + + if (e != s) { + tsDataSwap(&pMeterSids[e], &pMeterSids[s], TSDB_DATA_TYPE_BINARY, size); + } + + while (s < e) { + int32_t ret = compareFn(pMeterSids[s], pMeterSids[e], pOrderDesc); + if (ret > 0) { + break; + } + + if (ret == 0 && s != startLeftS) { + tsDataSwap(&pMeterSids[s], &pMeterSids[startLeftS++], TSDB_DATA_TYPE_BINARY, size); + } + s++; + } + + if (e != s) { + tsDataSwap(&pMeterSids[s], &pMeterSids[e], TSDB_DATA_TYPE_BINARY, size); + } + } + + int32_t rightPartStart = e + 1; + if (endRightS != end && e < end) { + int32_t left = rightPartStart; + int32_t right = end; + + while (right > endRightS && left <= endRightS) { + tsDataSwap(&pMeterSids[left++], &pMeterSids[right--], TSDB_DATA_TYPE_BINARY, size); + } + + rightPartStart += (end - endRightS); + } + + int32_t leftPartEnd = e - 1; + if (startLeftS != end && s > start) { + int32_t left = start; + int32_t right = leftPartEnd; + + while (left < startLeftS && right >= startLeftS) { + tsDataSwap(&pMeterSids[left++], &pMeterSids[right--], TSDB_DATA_TYPE_BINARY, size); + } + + leftPartEnd -= (startLeftS - start); + } + + if (leftPartEnd > start) { + tQSortEx(pMeterSids, size, start, leftPartEnd, pOrderDesc, compareFn); + } + + if (rightPartStart < end) { + tQSortEx(pMeterSids, size, rightPartStart, end, pOrderDesc, compareFn); + } +} + +int32_t *calculateSubGroup(void **pSids, int32_t numOfMeters, int32_t *numOfSubset, tOrderDescriptor *pOrderDesc, + __ext_compar_fn_t compareFn) { + int32_t *starterPos = (int32_t *)malloc((numOfMeters + 1) * sizeof(int32_t)); // add additional buffer + starterPos[0] = 0; + + *numOfSubset = 1; + + for (int32_t i = 1; i < numOfMeters; ++i) { + int32_t ret = compareFn(pSids[i - 1], pSids[i], pOrderDesc); + if (ret != 0) { + assert(ret == -1); + starterPos[(*numOfSubset)++] = i; + } + } + + starterPos[*numOfSubset] = numOfMeters; + assert(*numOfSubset <= numOfMeters); + + return starterPos; +} + +tTagSchema *tCreateTagSchema(SSchema *pSchema, int32_t numOfTagCols) { + if (numOfTagCols == 0 || pSchema == NULL) { + return NULL; + } + + tTagSchema *pTagSchema = + (tTagSchema *)calloc(1, sizeof(tTagSchema) + numOfTagCols * sizeof(int32_t) + sizeof(SSchema) * numOfTagCols); + + pTagSchema->colOffset[0] = 0; + pTagSchema->numOfCols = numOfTagCols; + for (int32_t i = 1; i < numOfTagCols; ++i) { + pTagSchema->colOffset[i] = (pTagSchema->colOffset[i - 1] + pSchema[i - 1].bytes); + } + + pTagSchema->pSchema = (SSchema *)&(pTagSchema->colOffset[numOfTagCols]); + memcpy(pTagSchema->pSchema, pSchema, sizeof(SSchema) * numOfTagCols); + return pTagSchema; +} + +tSidSet *tSidSetCreate(struct SMeterSidExtInfo **pMeterSidExtInfo, int32_t numOfMeters, SSchema *pSchema, + int32_t numOfTags, int16_t *orderList, int32_t numOfOrderCols) { + tSidSet *pSidSet = (tSidSet *)calloc(1, sizeof(tSidSet) + numOfOrderCols * sizeof(int16_t)); + if (pSidSet == NULL) { + return NULL; + } + + pSidSet->numOfSids = numOfMeters; + pSidSet->pSids = pMeterSidExtInfo; + pSidSet->pTagSchema = tCreateTagSchema(pSchema, numOfTags); + pSidSet->orderIdx.numOfOrderedCols = numOfOrderCols; + + memcpy(pSidSet->orderIdx.pData, orderList, numOfOrderCols * sizeof(int16_t)); + + pSidSet->starterPos = NULL; + return pSidSet; +} + +void tSidSetDestroy(tSidSet **pSets) { + if ((*pSets) != NULL) { + tfree((*pSets)->starterPos); + tfree((*pSets)->pTagSchema)(*pSets)->pSids = NULL; + tfree(*pSets); + } +} + +void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *pOrder) { + printf("sid: %-5d tags(", pMeterInfo->sid); + + for (int32_t i = 0; i < pOrder->numOfOrderedCols; ++i) { + int32_t tagIdx = pOrder->pData[i]; + + if (tagIdx == -1) { + /* it is the tbname column */ + printf("%s, ", pMeterInfo->tags); + continue; + } + + switch (pSchema->pSchema[tagIdx].type) { + case TSDB_DATA_TYPE_INT: + printf("%d, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, int32_t)); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%lf, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, double)); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%f, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, float)); + break; + case TSDB_DATA_TYPE_BIGINT: + printf("%ld, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, int64_t)); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%d, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, int16_t)); + break; + case TSDB_DATA_TYPE_TINYINT: + printf("%d, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, int8_t)); + break; + case TSDB_DATA_TYPE_BINARY: + printf("%s, ", GET_TAG_VAL_POINTER(pMeterInfo, tagIdx, pSchema, char)); + break; + case TSDB_DATA_TYPE_NCHAR: { + char *data = GET_TAG_VAL_POINTER(pMeterInfo, tagIdx, pSchema, char); + char buffer[512] = {0}; + + taosUcs4ToMbs(data, pSchema->pSchema[tagIdx].bytes, buffer); + printf("%s, ", buffer); + break; + } + case TSDB_DATA_TYPE_BOOL: + printf("%d, ", GET_TAG_VAL(pMeterInfo, tagIdx, pSchema, int8_t)); + break; + + default: + assert(false); + } + } + printf(")\n"); +} + +/* + * display all the subset groups for debug purpose only + */ +static void UNUSED_FUNC tSidSetDisplay(tSidSet *pSets) { + printf("%d meters.\n", pSets->numOfSids); + for (int32_t i = 0; i < pSets->numOfSids; ++i) { + printf("%d\t", pSets->pSids[i]->sid); + } + printf("\n"); + + printf("total number of subset group is: %d\n", pSets->numOfSubSet); + for (int32_t i = 0; i < pSets->numOfSubSet; ++i) { + int32_t s = pSets->starterPos[i]; + int32_t e = pSets->starterPos[i + 1]; + printf("the %d-th subgroup: \n", i + 1); + for (int32_t j = s; j < e; ++j) { + tTagsPrints(pSets->pSids[j], pSets->pTagSchema, &pSets->orderIdx); + } + } +} + +void tSidSetSort(tSidSet *pSets) { + pTrace("number of meters in sort: %d", pSets->numOfSids); + tOrderIdx *pOrderIdx = &pSets->orderIdx; + + if (pOrderIdx->numOfOrderedCols == 0 || pSets->numOfSids <= 1) { + // no group by clause + pSets->numOfSubSet = 1; + pSets->starterPos = (int32_t *)malloc(sizeof(int32_t) * (pSets->numOfSubSet + 1)); + pSets->starterPos[0] = 0; + pSets->starterPos[1] = pSets->numOfSids; + pTrace("all meters belong to one subgroup, no need to subgrouping ops."); +#ifdef _DEBUG_VIEW + tSidSetDisplay(pSets); +#endif + } else { + tOrderDescriptor *descriptor = + (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int16_t) * pSets->orderIdx.numOfOrderedCols); + descriptor->pTagSchema = pSets->pTagSchema; + descriptor->orderIdx = pSets->orderIdx; + + memcpy(descriptor->orderIdx.pData, pOrderIdx->pData, sizeof(int16_t) * pSets->orderIdx.numOfOrderedCols); + + tQSortEx((void **)pSets->pSids, POINTER_BYTES, 0, pSets->numOfSids - 1, descriptor, meterSidComparator); + pSets->starterPos = + calculateSubGroup((void **)pSets->pSids, pSets->numOfSids, &pSets->numOfSubSet, descriptor, meterSidComparator); + +#ifdef _DEBUG_VIEW + tSidSetDisplay(pSets); +#endif + tfree(descriptor); + } +} diff --git a/src/system/src/vnodeQueryImpl.c b/src/system/src/vnodeQueryImpl.c new file mode 100644 index 000000000000..a5d1d55f8d12 --- /dev/null +++ b/src/system/src/vnodeQueryImpl.c @@ -0,0 +1,6156 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "taosmsg.h" +#include "textbuffer.h" +#include "tinterpolation.h" +#include "tscSecondaryMerge.h" +#include "tscompression.h" +#include "ttime.h" +#include "vnode.h" +#include "vnodeRead.h" +#include "vnodeUtil.h" + +#include "vnodeCache.h" +#include "vnodeDataFilterFunc.h" +#include "vnodeFile.h" +#include "vnodeQueryImpl.h" + +static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, + int32_t size); +static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, + int32_t size); + +__read_data_fn_t readDataFunctor[2] = { + copyDataFromMMapBuffer, readDataFromDiskFile, +}; + +#define IS_DISK_DATA_BLOCK(q) ((q)->fileId >= 0) + +static int64_t comp_block_info_read_bytes = 0; + +static void destroyMeterQueryInfo(SMeterQueryInfo *pMeterQInfo); +static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo); +static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn, + bool loadData); +static int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, + SQueryRuntimeEnv *pRuntimeEnv, SMeterDataInfo *pMeterHeadDataInfo, + int32_t start, int32_t end); + +static TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index); +static TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index); + +static void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, int32_t pos); +static int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t step); + +static void setOutputBuffer(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResult); + +static void getAlignedIntervalQueryRange(SQuery *pQuery, TSKEY keyInData, TSKEY skey, TSKEY ekey); +static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, + SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, + __block_search_fn_t searchFn); + +static void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *sqinfo, int32_t numOfResult); +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, + int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, + SField *pFields, __block_search_fn_t searchFn); + +static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx); +static void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, + const SQueryRuntimeEnv *pRuntimeEnv); +static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes); +static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid); +static void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn); + +// check the offset value integrity +static FORCE_INLINE int32_t validateHeaderOffsetSegment(SQInfo *pQInfo, char *filePath, int32_t vid, char *data, + int32_t size) { + if (!taosCheckChecksumWhole((uint8_t *)data + TSDB_FILE_HEADER_LEN, size)) { + dLError("QInfo:%p vid:%d, failed to read header file:%s, file offset area is broken", pQInfo, vid, filePath); + return -1; + } + return 0; +} + +static FORCE_INLINE int32_t getCompHeaderSegSize(SVnodeCfg *pCfg) { + return pCfg->maxSessions * sizeof(SCompHeader) + sizeof(TSCKSUM); +} + +static FORCE_INLINE int32_t getCompHeaderStartPosition(SVnodeCfg *pCfg) { + return TSDB_FILE_HEADER_LEN + getCompHeaderSegSize(pCfg); +} + +static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *pMeterObj, SCompHeader *pCompHeader, + SQueryFileInfo *pQueryFileInfo, int32_t headerSize) { + if (pCompHeader->compInfoOffset < headerSize || pCompHeader->compInfoOffset > pQueryFileInfo->headFileSize) { + dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid, size:%ld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, + pQueryFileInfo->headFileSize); + + return -1; + } + + return 0; +} + +// check compinfo integrity +static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, char *filePath, int32_t vid, + SCompInfo *compInfo, int64_t offset) { + if (!taosCheckChecksumWhole((uint8_t *)compInfo, sizeof(SCompInfo))) { + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%lld", pQInfo, vid, filePath, + offset); + return -1; + } + return 0; +} + +static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, char *filePath, SCompInfo *compInfo, char *pBlock, + int32_t vid, TSCKSUM checksum) { + uint32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); + + if (checksum != taosCalcChecksum(0, (uint8_t *)pBlock, size)) { + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%ld", pQInfo, vid, filePath, + (char *)compInfo + sizeof(SCompInfo)); + return -1; + } + + return 0; +} + +static void vnodeFreeFieldsEx(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + vnodeFreeFields(pQuery); + + vnodeInitLoadCompBlockInfo(&pRuntimeEnv->loadCompBlockInfo); +} + +static bool vnodeIsCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + /* + * check if data file header of this table has been loaded into memory, avoid to reloaded comp Block info + */ + SQueryLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; + + // if vnodeFreeFields is called, the pQuery->pFields is NULL + if (pLoadCompBlockInfo->fileListIndex == fileIndex && pLoadCompBlockInfo->sid == pMeterObj->sid && + pQuery->pFields != NULL && pQuery->fileId > 0) { + assert(pRuntimeEnv->pHeaderFiles[fileIndex].fileID == pLoadCompBlockInfo->fileId && pQuery->numOfBlocks > 0); + return true; + } + + return false; +} + +static void vnodeSetCompBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex, int32_t sid) { + SQueryLoadCompBlockInfo *pLoadCompBlockInfo = &pRuntimeEnv->loadCompBlockInfo; + + pLoadCompBlockInfo->sid = sid; + pLoadCompBlockInfo->fileListIndex = fileIndex; + pLoadCompBlockInfo->fileId = pRuntimeEnv->pHeaderFiles[fileIndex].fileID; +} + +static void vnodeInitLoadCompBlockInfo(SQueryLoadCompBlockInfo *pCompBlockLoadInfo) { + pCompBlockLoadInfo->sid = -1; + pCompBlockLoadInfo->fileId = -1; + pCompBlockLoadInfo->fileListIndex = -1; +} + +static bool vnodeIsDatablockLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + + /* this block has been loaded into memory, return directly */ + if (pLoadInfo->fileId == pQuery->fileId && pLoadInfo->slotIdx == pQuery->slot && pQuery->slot != -1 && + pLoadInfo->sid == pMeterObj->sid) { + assert(fileIndex == pLoadInfo->fileListIndex); + return true; + } + + return false; +} + +static void vnodeSetDataBlockInfoLoaded(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t fileIndex) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + + pLoadInfo->fileId = pQuery->fileId; + pLoadInfo->slotIdx = pQuery->slot; + pLoadInfo->fileListIndex = fileIndex; + pLoadInfo->sid = pMeterObj->sid; +} + +static void vnodeInitDataBlockInfo(SQueryLoadBlockInfo *pBlockLoadInfo) { + pBlockLoadInfo->slotIdx = -1; + pBlockLoadInfo->fileId = -1; + pBlockLoadInfo->sid = -1; + pBlockLoadInfo->fileListIndex = -1; +} + +/* + * read comp block info from header file + * + */ +static int vnodeGetCompBlockInfo(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + + SVnodeCfg * pCfg = &vnodeList[pMeterObj->vnode].cfg; + SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIndex]; + int32_t fd = pQueryFileInfo->headerFd; + + int64_t st = taosGetTimestampUs(); + + if (vnodeIsCompBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIndex)) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d compBlock info is loaded, not reload", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQueryFileInfo->fileID); + + return pQuery->numOfBlocks; + } + + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + pSummary->readCompInfo++; + pSummary->numOfSeek++; + +#if 1 + char *data = pRuntimeEnv->pHeaderFiles[fileIndex].pHeaderFileData; + UNUSED(fd); +#else + char *data = calloc(1, tmsize + TSDB_FILE_HEADER_LEN); + read(fd, data, tmsize + TSDB_FILE_HEADER_LEN); +#endif + + // check the offset value integrity + if (validateHeaderOffsetSegment(pQInfo, pQueryFileInfo->headerFilePath, pMeterObj->vnode, data, + getCompHeaderSegSize(pCfg)) < 0) { + return -1; + } + + int64_t offset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; + SCompHeader *compHeader = (SCompHeader *)(data + offset); + + // no data in this file for specified meter, abort + if (compHeader->compInfoOffset == 0) { + return 0; + } + + // corrupted file may cause the invalid compInfoOffset, check needs + if (validateCompBlockOffset(pQInfo, pMeterObj, compHeader, pQueryFileInfo, getCompHeaderStartPosition(pCfg)) < 0) { + return -1; + } + +#if 1 + SCompInfo *compInfo = (SCompInfo *)(data + compHeader->compInfoOffset); +#else + lseek(fd, compHeader->compInfoOffset, SEEK_SET); + SCompInfo CompInfo = {0}; + SCompInfo *compInfo = &CompInfo; + read(fd, compInfo, sizeof(SCompInfo)); +#endif + + // check compblock info integrity + if (validateCompBlockInfoSegment(pQInfo, pQueryFileInfo->headerFilePath, pMeterObj->vnode, compInfo, + compHeader->compInfoOffset) < 0) { + return -1; + } + + if (compInfo->numOfBlocks <= 0 || compInfo->uid != pMeterObj->uid) { + return 0; + } + + // free allocated SField data + vnodeFreeFieldsEx(pRuntimeEnv); + pQuery->numOfBlocks = (int32_t)compInfo->numOfBlocks; + + int32_t compBlockSize = compInfo->numOfBlocks * sizeof(SCompBlock); + size_t bufferSize = compBlockSize + POINTER_BYTES * compInfo->numOfBlocks; + + // prepare buffer to hold compblock data + if (pQuery->blockBufferSize != bufferSize) { + pQuery->pBlock = realloc(pQuery->pBlock, bufferSize); + pQuery->blockBufferSize = (int32_t)bufferSize; + } + + memset(pQuery->pBlock, 0, (size_t)pQuery->blockBufferSize); + +#if 1 + memcpy(pQuery->pBlock, (char *)compInfo + sizeof(SCompInfo), (size_t)compBlockSize); + TSCKSUM checksum = *(TSCKSUM *)((char *)compInfo + sizeof(SCompInfo) + compBlockSize); +#else + TSCKSUM checksum; + read(fd, pQuery->pBlock, compBlockSize); + read(fd, &checksum, sizeof(TSCKSUM)); +#endif + + // check comp block integrity + if (validateCompBlockSegment(pQInfo, pQueryFileInfo->headerFilePath, compInfo, (char *)pQuery->pBlock, + pMeterObj->vnode, checksum) < 0) { + return -1; + } + + pQuery->pFields = (SField **)((char *)pQuery->pBlock + compBlockSize); + vnodeSetCompBlockInfoLoaded(pRuntimeEnv, fileIndex, pMeterObj->sid); + + int64_t et = taosGetTimestampUs(); + qTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, load compblock info, size:%d, elapsed:%f ms", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pRuntimeEnv->pHeaderFiles[fileIndex].fileID, + compBlockSize, (et - st) / 1000.0); + + pSummary->totalCompInfoSize += compBlockSize; + pSummary->loadCompInfoUs += (et - st); + + return pQuery->numOfBlocks; +} + +bool doRevisedResultsByLimit(SQInfo *pQInfo) { + SQuery *pQuery = &pQInfo->query; + + if ((pQuery->limit.limit > 0) && (pQuery->pointsRead + pQInfo->pointsRead > pQuery->limit.limit)) { + pQuery->pointsRead = pQuery->limit.limit - pQInfo->pointsRead; + + setQueryStatus(pQuery, QUERY_COMPLETED); // query completed + return true; + } + + return false; +} + +static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t StartQueryTimestamp, void *inputData, + char *primaryColumnData, int32_t size, int32_t functionId, SField *pField, bool hasNull, + int32_t blockStatus, void *param, int32_t scanFlag); + +static tFilePage **createInMemGroupResultBuf(SQLFunctionCtx *pCtx, int32_t nOutputCols, int32_t nAlloc); +static void destroyBuf(tFilePage **pBuf, int32_t nOutputCols); + +static int32_t binarySearchForBlockImpl(SCompBlock *pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) { + int32_t firstSlot = 0; + int32_t lastSlot = numOfBlocks - 1; + + int32_t midSlot = firstSlot; + + while (1) { + numOfBlocks = lastSlot - firstSlot + 1; + midSlot = (firstSlot + (numOfBlocks >> 1)); + + if (numOfBlocks == 1) break; + + if (skey > pBlock[midSlot].keyLast) { + if (numOfBlocks == 2) break; + if ((order == TSQL_SO_DESC) && (skey < pBlock[midSlot + 1].keyFirst)) break; + firstSlot = midSlot + 1; + } else if (skey < pBlock[midSlot].keyFirst) { + if ((order == TSQL_SO_ASC) && (skey > pBlock[midSlot - 1].keyLast)) break; + lastSlot = midSlot - 1; + } else { + break; // got the slot + } + } + + return midSlot; +} + +static int32_t binarySearchForBlock(SQuery *pQuery, int64_t key) { + return binarySearchForBlockImpl(pQuery->pBlock, pQuery->numOfBlocks, key, pQuery->order.order); +} + +static UNUSED_FUNC int32_t resetMMapWindow(SQueryFileInfo *pQueryFileInfo) { + /* unmap previous buffer */ + munmap(pQueryFileInfo->pDataFileData, pQueryFileInfo->defaultMappingSize); + + pQueryFileInfo->dtFileMappingOffset = 0; + pQueryFileInfo->pDataFileData = mmap(NULL, pQueryFileInfo->defaultMappingSize, PROT_READ, MAP_PRIVATE | MAP_POPULATE, + pQueryFileInfo->dataFd, pQueryFileInfo->dtFileMappingOffset); + if (pQueryFileInfo->pDataFileData == MAP_FAILED) { + dError("failed to mmaping data file:%s, reason:%s", pQueryFileInfo->dataFilePath, strerror(errno)); + return -1; + } + + return 0; +} + +static int32_t moveMMapWindow(SQueryFileInfo *pQueryFileInfo, uint64_t offset) { + uint64_t upperBnd = (pQueryFileInfo->dtFileMappingOffset + pQueryFileInfo->defaultMappingSize - 1); + + /* data that are located in current mmapping window */ + if ((offset >= pQueryFileInfo->dtFileMappingOffset && offset <= upperBnd) && + pQueryFileInfo->pDataFileData != MAP_FAILED) { + // if it mapping failed, try again when it is called. + return 0; + } + + /* + * 1. there is import data that locate farther from the beginning, but with less timestamp, so we need to move the + * window backwards + * 2. otherwise, move the mmaping window forward + */ + upperBnd = (offset / pQueryFileInfo->defaultMappingSize + 1) * pQueryFileInfo->defaultMappingSize - 1; + + /* unmap previous buffer */ + if (pQueryFileInfo->pDataFileData != MAP_FAILED) { + int32_t ret = munmap(pQueryFileInfo->pDataFileData, pQueryFileInfo->defaultMappingSize); + pQueryFileInfo->pDataFileData = MAP_FAILED; + if (ret != 0) { + dError("failed to unmmaping data file:%s, handle:%d, offset:%ld, reason:%s", pQueryFileInfo->dataFilePath, + pQueryFileInfo->dataFd, pQueryFileInfo->dtFileMappingOffset, strerror(errno)); + return -1; + } + } + + /* mmap from the new position */ + pQueryFileInfo->dtFileMappingOffset = upperBnd - pQueryFileInfo->defaultMappingSize + 1; + pQueryFileInfo->pDataFileData = mmap(NULL, pQueryFileInfo->defaultMappingSize, PROT_READ, MAP_PRIVATE | MAP_POPULATE, + pQueryFileInfo->dataFd, pQueryFileInfo->dtFileMappingOffset); + if (pQueryFileInfo->pDataFileData == MAP_FAILED) { + dError("failed to mmaping data file:%s, handle:%d, offset:%ld, reason:%s", pQueryFileInfo->dataFilePath, + pQueryFileInfo->dataFd, pQueryFileInfo->dtFileMappingOffset, strerror(errno)); + return -1; + } + + /* advise kernel the usage of mmaped data */ + if (madvise(pQueryFileInfo->pDataFileData, pQueryFileInfo->defaultMappingSize, MADV_SEQUENTIAL) == -1) { + dError("failed to advise kernel the usage of data file:%s, handle:%d, reason:%s", + pQueryFileInfo->dataFilePath, pQueryFileInfo->dataFd, strerror(errno)); + } + + return 0; +} + +static int32_t copyDataFromMMapBuffer(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, + int32_t size) { + assert(size >= 0); + + int32_t ret = moveMMapWindow(pQueryFile, offset); + dTrace("QInfo:%p finished move to correct position:%ld", pQInfo, taosGetTimestampUs()); + + if (pQueryFile->pDataFileData == MAP_FAILED || ret != TSDB_CODE_SUCCESS) { + dTrace("QInfo:%p move window failed. ret:%d", pQInfo, ret); + return -1; + } + + uint64_t upperBnd = pQueryFile->dtFileMappingOffset + pQueryFile->defaultMappingSize - 1; + + /* data are enclosed in current mmap window */ + if (offset + size <= upperBnd) { + uint64_t startPos = offset - pQueryFile->dtFileMappingOffset; + memcpy(buf, pQueryFile->pDataFileData + startPos, size); + + dTrace("QInfo:%p copy data completed, size:%d, time:%ld", pQInfo, size, taosGetTimestampUs()); + + } else { + uint32_t firstPart = upperBnd - offset + 1; + memcpy(buf, pQueryFile->pDataFileData + (offset - pQueryFile->dtFileMappingOffset), firstPart); + + dTrace("QInfo:%p copy data first part,size:%d, time:%ld", pQInfo, firstPart, taosGetTimestampUs()); + + char *dst = buf + firstPart; + + /* remain data */ + uint32_t remain = size - firstPart; + while (remain > 0) { + int32_t ret1 = moveMMapWindow(pQueryFile, pQueryFile->dtFileMappingOffset + pQueryFile->defaultMappingSize); + if (ret1 != 0) { + return ret1; + } + + uint32_t len = (remain > pQueryFile->defaultMappingSize) ? pQueryFile->defaultMappingSize : remain; + + /* start from the 0 position */ + memcpy(dst, pQueryFile->pDataFileData, len); + remain -= len; + dst += len; + + dTrace("QInfo:%p copy data part,size:%d, time:%ld", pQInfo, len, taosGetTimestampUs()); + } + } + + return 0; +} + +static int32_t readDataFromDiskFile(int fd, SQInfo *pQInfo, SQueryFileInfo *pQueryFile, char *buf, uint64_t offset, + int32_t size) { + assert(size >= 0); + + int32_t ret = (int32_t)lseek(fd, offset, SEEK_SET); + if (ret == -1) { + // qTrace("QInfo:%p seek failed, reason:%s", pQInfo, strerror(errno)); + return -1; + } + + ret = read(fd, buf, size); + // qTrace("QInfo:%p read data %d completed", pQInfo, size); + return 0; +} + +static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFileInfo *pQueryFileInfo, SCompBlock *pBlock, SField *pFields, + int32_t col, SData *sdata, void *tmpBuf, char *buffer, int32_t buffersize) { + char *dst = (pBlock->algorithm) ? tmpBuf : sdata->data; + + int64_t offset = pBlock->offset + pFields[col].offset; + SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + + int fd = pBlock->last ? pQueryFileInfo->lastFd : pQueryFileInfo->dataFd; + int32_t ret = (*readDataFunctor[DEFAULT_IO_ENGINE])(fd, pQInfo, pQueryFileInfo, dst, offset, pFields[col].len); + if (ret != 0) { + return ret; + } + + // load checksum + TSCKSUM checksum = 0; + ret = (*readDataFunctor[DEFAULT_IO_ENGINE])(fd, pQInfo, pQueryFileInfo, (char *)&checksum, offset + pFields[col].len, + sizeof(TSCKSUM)); + if (ret != 0) { + return ret; + } + + // check column data integrity + if (checksum != taosCalcChecksum(0, (const uint8_t *)dst, pFields[col].len)) { + dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%ld", GET_QINFO_ADDR(pQuery), + pQueryFileInfo->dataFilePath, col, offset); + + return -1; + } + + if (pBlock->algorithm) { + (*pDecompFunc[pFields[col].type])(tmpBuf, pFields[col].len, pBlock->numOfPoints, sdata->data, + pFields[col].bytes*pBlock->numOfPoints, pBlock->algorithm, buffer, buffersize); + } + + return 0; +} + +static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SQueryFileInfo *pQueryFileInfo, + SCompBlock *pBlock, SField **pField) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + size_t size = sizeof(SField) * (pBlock->numOfCols) + sizeof(TSCKSUM); + + // if *pField != NULL, this block is loaded once, in current query do nothing + if (*pField == NULL) { // load the fields information once + *pField = malloc(size); + } + + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + pSummary->totalFieldSize += size; + pSummary->readField++; + pSummary->numOfSeek++; + + int64_t st = taosGetTimestampUs(); + + int fd = pBlock->last ? pQueryFileInfo->lastFd : pQueryFileInfo->dataFd; + int32_t ret = + (*readDataFunctor[DEFAULT_IO_ENGINE])(fd, pQInfo, pQueryFileInfo, (char *)(*pField), pBlock->offset, size); + if (ret != 0) { + return ret; + } + + // check fields integrity + if (!taosCheckChecksumWhole((uint8_t *)(*pField), size)) { + dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%lld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pQueryFileInfo->dataFilePath, + pBlock->offset); + return -1; + } + + int64_t et = taosGetTimestampUs(); + qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, load field info, size:%d, elapsed:%f ms", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, size, (et - st) / 1000.0); + + pSummary->loadFieldUs += (et - st); + return 0; +} + +static void fillWithNull(SQuery *pQuery, char *dst, int32_t col, int32_t numOfPoints) { + int32_t bytes = pQuery->colList[col].data.bytes; + int32_t type = pQuery->colList[col].data.type; + + setNullN(dst, type, bytes, numOfPoints); +} + +static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIdx, + bool loadPrimaryCol, bool loadSField) { + int32_t i = 0, j = 0; + + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + SData ** sdata = pRuntimeEnv->colDataBuffer; + + SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx]; + SData ** primaryTSBuf = &pRuntimeEnv->primaryColBuffer; + void * tmpBuf = pRuntimeEnv->unzipBuffer; + + if (vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIdx)) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, data block has been loaded, ts:%d, slot:%d, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, loadPrimaryCol, pQuery->slot, + pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); + + return 0; + } + + /* failed to load fields info, return with error info */ + if (loadSField && (loadDataBlockFieldsInfo(pRuntimeEnv, pQueryFileInfo, pBlock, pField) != 0)) { + return -1; + } + + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + int32_t columnBytes = 0; + + int64_t st = taosGetTimestampUs(); + + if (loadPrimaryCol) { + if (PRIMARY_TSCOL_LOADED(pQuery)) { + *primaryTSBuf = sdata[0]; + } else { + columnBytes += (*pField)[PRIMARYKEY_TIMESTAMP_COL_INDEX].len + sizeof(TSCKSUM); + int32_t ret = loadColumnIntoMem(pQuery, pQueryFileInfo, pBlock, *pField, PRIMARYKEY_TIMESTAMP_COL_INDEX, + *primaryTSBuf, tmpBuf, pRuntimeEnv->secondaryUnzipBuffer, + pRuntimeEnv->internalBufSize); + if (ret != 0) { + return -1; + } + + pSummary->numOfSeek++; + j += 1; // first column of timestamp is not needed to be read again + } + } + + int32_t ret = 0; + + /* the first round always be 1, the secondary round is determined by queried + * function */ + int32_t round = pRuntimeEnv->scanFlag; + + while (j < pBlock->numOfCols && i < pQuery->numOfCols) { + if ((*pField)[j].colId < pQuery->colList[i].data.colId) { + ++j; + } else if ((*pField)[j].colId == pQuery->colList[i].data.colId) { + /* + * during supplementary scan: + * 1. primary ts column (always loaded) + * 2. query specified columns + * 3. in case of filter column required, filter columns must be loaded. + */ + if (pQuery->colList[i].req[round] == 1 || pQuery->colList[i].data.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + /* if data of this column in current block are all null, do NOT read it from disk */ + if ((*pField)[j].numOfNullPoints == pBlock->numOfPoints) { + fillWithNull(pQuery, sdata[i]->data, i, pBlock->numOfPoints); + } else { + columnBytes += (*pField)[j].len + sizeof(TSCKSUM); + ret = loadColumnIntoMem(pQuery, pQueryFileInfo, pBlock, *pField, j, sdata[i], tmpBuf, + pRuntimeEnv->secondaryUnzipBuffer, pRuntimeEnv->internalBufSize); + + pSummary->numOfSeek++; + } + } + ++i; + ++j; + } else { + /* + * pQuery->colList[i].colIdx < (*pFields)[j].colId this column is not existed in current block, fill with NULL value + */ + fillWithNull(pQuery, sdata[i]->data, i, pBlock->numOfPoints); + + pSummary->totalGenData += (pBlock->numOfPoints * pQuery->colList[i].data.bytes); + ++i; + } + } + + if (j >= pBlock->numOfCols && i < pQuery->numOfCols) { + // remain columns need to set null value + while (i < pQuery->numOfCols) { + fillWithNull(pQuery, sdata[i]->data, i, pBlock->numOfPoints); + + pSummary->totalGenData += (pBlock->numOfPoints * pQuery->colList[i].data.bytes); + ++i; + } + } + + int64_t et = taosGetTimestampUs(); + qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, load block completed, ts loaded:%d, rec:%d, elapsed:%f ms", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, loadPrimaryCol, + pBlock->numOfPoints, (et - st) / 1000.0); + + pSummary->totalBlockSize += columnBytes; + pSummary->loadBlocksUs += (et - st); + pSummary->readDiskBlocks++; + + vnodeSetDataBlockInfoLoaded(pRuntimeEnv, pMeterObj, fileIdx); + return ret; +} + +// todo ignore the blockType, pass the pQuery into this function +SBlockInfo getBlockBasicInfo(void *pBlock, int32_t blockType) { + SBlockInfo blockInfo = {0}; + if (IS_FILE_BLOCK(blockType)) { + SCompBlock *pDiskBlock = (SCompBlock *)pBlock; + + blockInfo.keyFirst = pDiskBlock->keyFirst; + blockInfo.keyLast = pDiskBlock->keyLast; + blockInfo.size = pDiskBlock->numOfPoints; + blockInfo.numOfCols = pDiskBlock->numOfCols; + } else { + SCacheBlock *pCacheBlock = (SCacheBlock *)pBlock; + + blockInfo.keyFirst = getTimestampInCacheBlock(pCacheBlock, 0); + blockInfo.keyLast = getTimestampInCacheBlock(pCacheBlock, pCacheBlock->numOfPoints - 1); + blockInfo.size = pCacheBlock->numOfPoints; + blockInfo.numOfCols = pCacheBlock->pMeterObj->numOfColumns; + } + + return blockInfo; +} + +static bool checkQueryRangeAgainstNextBlock(SBlockInfo *pBlockInfo, SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + if ((QUERY_IS_ASC_QUERY(pQuery) && pBlockInfo->keyFirst > pQuery->ekey) || + (!QUERY_IS_ASC_QUERY(pQuery) && pBlockInfo->keyLast < pQuery->ekey)) { + int32_t pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : pBlockInfo->size - 1; + + savePointPosition(&pRuntimeEnv->nextPos, pQuery->fileId, pQuery->slot, pos); + setQueryStatus(pQuery, QUERY_COMPLETED); + + return false; + } + + return true; +} + +/** + * + * @param pQuery + * @param pBlockInfo + * @param forwardStep + * @return TRUE means query not completed, FALSE means query is completed + */ +static bool queryCompleteInBlock(SQuery *pQuery, SBlockInfo *pBlockInfo, int32_t forwardStep) { + if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { + assert(pQuery->checkBufferInLoop == 1 && pQuery->over == QUERY_RESBUF_FULL && pQuery->pointsOffset == 0); + + assert((QUERY_IS_ASC_QUERY(pQuery) && forwardStep + pQuery->pos <= pBlockInfo->size) || + (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->pos - forwardStep + 1 >= 0)); + + // current query completed + if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } + + return true; + } else { // query completed + if ((pQuery->ekey <= pBlockInfo->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->ekey >= pBlockInfo->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return true; + } + + return false; + } +} + +/** + * save triple tuple of (fileId, slot, pos) to SPositionInfo + */ +void savePointPosition(SPositionInfo *position, int32_t fileId, int32_t slot, int32_t pos) { + /* + * slot == -1 && pos == -1 means no data left anymore + */ + assert(fileId >= -1 && slot >= -1 && pos >= -1); + + position->fileId = fileId; + position->slot = slot; + position->pos = pos; +} + +static FORCE_INLINE void saveNextAccessPositionInCache(SPositionInfo *position, int32_t slotIdx, int32_t pos) { + savePointPosition(position, -1, slotIdx, pos); +} + +// todo all functions that call this function should check the returned data +// blocks status +SCacheBlock *getCacheDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slot) { + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || slot < 0) { + return NULL; + } + + assert(slot < pCacheInfo->maxBlocks); + + SCacheBlock *pBlock = pCacheInfo->cacheBlocks[slot]; + if (pBlock == NULL) { + dError("QInfo:%p NULL Block In Cache, available block:%d, last block:%d, accessed null block:%d, pBlockId:%d", + GET_QINFO_ADDR(pQuery), pCacheInfo->numOfBlocks, pCacheInfo->currentSlot, slot, pQuery->blockId); + return NULL; + } + + if (pMeterObj != pBlock->pMeterObj || pBlock->blockId > pQuery->blockId || pBlock->numOfPoints <= 0) { + dWarn("QInfo:%p vid:%d sid:%d id:%s, cache block is overwritten, slot:%d blockId:%d qBlockId:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->blockId, + pQuery->blockId); + return NULL; + } + + return pBlock; +} + +static SCompBlock *getDiskDataBlock(SQuery *pQuery, int32_t slot) { + assert(pQuery->fileId >= 0 && slot >= 0 && slot < pQuery->numOfBlocks && pQuery->pBlock != NULL); + return &pQuery->pBlock[slot]; +} + +static void *getGenericDataBlock(SMeterObj *pMeterObj, SQuery *pQuery, int32_t slot) { + if (IS_DISK_DATA_BLOCK(pQuery)) { + return getDiskDataBlock(pQuery, slot); + } else { + return getCacheDataBlock(pMeterObj, pQuery, slot); + } +} + +static int32_t getFileIdFromKey(int32_t vid, TSKEY key) { + SVnodeObj *pVnode = &vnodeList[vid]; + int64_t delta = (int64_t)pVnode->cfg.daysPerFile * tsMsPerDay[pVnode->cfg.precision]; + + return (int32_t)(key / delta); // set the starting fileId +} + +enum { + QUERY_RANGE_LESS_EQUAL = 0, + QUERY_RANGE_GREATER_EQUAL = 1, +}; + +static bool getQualifiedDataBlock(SMeterObj *pMeterObj, SQueryRuntimeEnv *pRuntimeEnv, int32_t type, + __block_search_fn_t searchFn) { + int32_t blkIdx = -1; + int32_t fid = -1; + int32_t step = (type == QUERY_RANGE_GREATER_EQUAL) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; + + SQuery *pQuery = pRuntimeEnv->pQuery; + pQuery->slot = -1; + + TSKEY key = pQuery->lastKey; + + SData *primaryColBuffer = pRuntimeEnv->primaryColBuffer; + pQuery->fileId = getFileIdFromKey(pMeterObj->vnode, key) - step; + + while (1) { + if ((fid = getNextDataFileCompInfo(pRuntimeEnv, pMeterObj, step)) < 0) { + break; + } + + blkIdx = binarySearchForBlock(pQuery, key); + + if (type == QUERY_RANGE_GREATER_EQUAL) { + if (key <= pQuery->pBlock[blkIdx].keyLast) { + break; + } else { + blkIdx = -1; + } + } else { + if (key >= pQuery->pBlock[blkIdx].keyFirst) { + break; + } else { + blkIdx = -1; + } + } + } + + /* failed to find qualified point in file, abort */ + if (blkIdx == -1) { + return false; + } + + assert(blkIdx >= 0 && blkIdx < pQuery->numOfBlocks); + + // load first data block into memory failed, caused by disk block error + bool blockLoaded = false; + while (blkIdx < pQuery->numOfBlocks) { + pQuery->slot = blkIdx; + if (loadDataBlockIntoMem(&pQuery->pBlock[pQuery->slot], &pQuery->pFields[pQuery->slot], pRuntimeEnv, fid, true, + true) == 0) { + SET_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus); + blockLoaded = true; + break; + } + + dError("QInfo:%p fileId:%d total numOfBlks:%d blockId:%d into memory failed due to error in disk files", + GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->numOfBlocks, blkIdx); + blkIdx += step; + } + + // failed to load data from disk, abort current query + if (blockLoaded == false) { + return false; + } + + SCompBlock *pBlocks = getDiskDataBlock(pQuery, blkIdx); + + /* + * search qualified points in blk, according to primary key (timestamp) column + */ + pQuery->pos = searchFn(primaryColBuffer->data, pBlocks->numOfPoints, key, pQuery->order.order); + assert(pQuery->pos >= 0 && pQuery->fileId >= 0 && pQuery->slot >= 0); + + return true; +} + +static char *doGetDataBlockImpl(const char *sdata, int32_t colIdx, bool isDiskFileBlock) { + if (isDiskFileBlock) { + return ((SData **)sdata)[colIdx]->data; + } else { + return ((SCacheBlock *)sdata)->offset[colIdx]; + } +} + +static SField *getFieldInfo(SQuery *pQuery, SBlockInfo *pBlockInfo, SField *pFields, int32_t column) { + // no SField info exist, or column index larger than the output column, no result. + if (pFields == NULL || column >= pQuery->numOfOutputCols) { + return NULL; + } + + SColIndexEx *pColIndexEx = &pQuery->pSelectExpr[column].pBase.colInfo; + + // for a tag column, no corresponding field info + if (pColIndexEx->isTag) { + return NULL; + } + + /* + * Choose the right column field info by field id, since the file block may be out of date, + * which means the newest table schema is not equalled to the schema of this block. + */ + for (int32_t i = 0; i < pBlockInfo->numOfCols; ++i) { + if (pColIndexEx->colId == pFields[i].colId) { + return &pFields[i]; + } + } + + return NULL; +} + +/* + * not null data in two cases: + * 1. tags data: isTag == true; + * 2. data locate in file, numOfNullPoints == 0 or pFields does not needed to be loaded + */ +static bool hasNullVal(SQuery *pQuery, int32_t col, SBlockInfo *pBlockInfo, SField *pFields, bool isDiskFileBlock) { + bool ret = true; + + if (pQuery->pSelectExpr[col].pBase.colInfo.isTag) { + ret = false; + } else if (isDiskFileBlock) { + if (pFields == NULL) { + ret = false; + } else { + SField *pField = getFieldInfo(pQuery, pBlockInfo, pFields, col); + if (pField != NULL && pField->numOfNullPoints == 0) { + ret = false; + } + } + } + + return ret; +} + +static char *doGetDataBlocks(bool isDiskFileBlock, SQueryRuntimeEnv *pRuntimeEnv, char *data, int32_t colIdx, + int32_t colId, int16_t type, int16_t bytes, int32_t tmpBufIndex) { + char *pData = NULL; + + if (isDiskFileBlock) { + pData = doGetDataBlockImpl(data, colIdx, isDiskFileBlock); + } else { + SCacheBlock* pCacheBlock = (SCacheBlock*) data; + SMeterObj* pMeter = pRuntimeEnv->pMeterObj; + + if (colIdx < 0 || pMeter->numOfColumns <= colIdx || pMeter->schema[colIdx].colId != colId) { + /* data in cache is not current available, we need fill the data block in null value */ + pData = pRuntimeEnv->colDataBuffer[tmpBufIndex]->data; + setNullN(pData, type, bytes, pCacheBlock->numOfPoints); + } else { + pData = doGetDataBlockImpl(data, colIdx, isDiskFileBlock); + } + } + + return pData; +} + +static char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, char *data, SArithmeticSupport *sas, int32_t col, + bool isDiskFileBlock) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + + char *dataBlock = NULL; + + int32_t functionId = pQuery->pSelectExpr[col].pBase.functionId; + + if (functionId == TSDB_FUNC_ARITHM) { + sas->pExpr = &pQuery->pSelectExpr[col]; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + pCtx->startOffset = pQuery->pos; // set the offset value + } else { + pCtx->startOffset = pQuery->pos - pCtx->size + 1; + } + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + int32_t colIdx = isDiskFileBlock ? pQuery->colList[i].colIdxInBuf : pQuery->colList[i].colIdx; + + SColumnFilterMsg *pColMsg = &pQuery->colList[i].data; + char *pData = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pColMsg->colId, pColMsg->type, + pColMsg->bytes, pQuery->colList[i].colIdxInBuf); + + sas->elemSize[i] = pColMsg->bytes; + sas->data[i] = pData + pCtx->startOffset * sas->elemSize[i]; // start from the offset + } + sas->numOfCols = pQuery->numOfCols; + sas->offset = 0; + } else { // other type of query function + SColIndexEx *pCol = &pQuery->pSelectExpr[col].pBase.colInfo; + int32_t colIdx = isDiskFileBlock ? pCol->colIdxInBuf : pCol->colIdx; + + if (pCol->isTag) { + dataBlock = NULL; + } else { + /* + * the colIdx is acquired from the first meter of all qualified meters in this vnode during query prepare stage, + * the remain meter may not have the required column in cache actually. + * So, the validation of required column in cache with the corresponding meter schema is reinforced. + */ + dataBlock = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pCol->colId, pCtx[col].inputType, + pCtx[col].inputBytes, pCol->colIdxInBuf); + } + } + + return dataBlock; +} + +/** + * + * @param pRuntimeEnv + * @param forwardStep + * @param primaryKeyCol + * @param data + * @param pFields + * @param isDiskFileBlock + * @return the incremental number of output value, so it maybe 0 for fixed number of query, + * such as count/min/max etc. + */ +static int32_t applyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t forwardStep, TSKEY *primaryKeyCol, char *data, + SField *pFields, SBlockInfo *pBlockInfo, bool isDiskFileBlock) { + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + SQuery * pQuery = pRuntimeEnv->pQuery; + + int64_t prevNumOfRes = getNumOfResult(pRuntimeEnv); + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + if (pRuntimeEnv->go[k]) { + SField dummyField = {0}; + SArithmeticSupport sas = {0}; + + // todo refactor + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + + if (!IS_MASTER_SCAN(pRuntimeEnv) && + !(functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST || + functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { + continue; + } + + bool hasNull = hasNullVal(pQuery, k, pBlockInfo, pFields, isDiskFileBlock); + char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sas, k, isDiskFileBlock); + + SField *tpField = NULL; + + if (pFields != NULL) { + tpField = getFieldInfo(pQuery, pBlockInfo, pFields, k); + /* + * Field info not exist, the required column is not present in current block, + * so all data must be null value in current block. + */ + if (tpField == NULL) { + tpField = &dummyField; + tpField->numOfNullPoints = (int32_t)forwardStep; + } + } + + TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; + + int64_t alignedTimestamp = taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit); + setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, forwardStep, functionId, + tpField, hasNull, pRuntimeEnv->blockStatus, &sas, pRuntimeEnv->scanFlag); + + pRuntimeEnv->go[k] = aAggs[functionId].xFunction(&pCtx[k]); + } + } + + int64_t numOfIncrementRes = getNumOfResult(pRuntimeEnv) - prevNumOfRes; + validateTimestampForSupplementResult(pRuntimeEnv, numOfIncrementRes); + + return (int32_t)numOfIncrementRes; +} + +/** + * if sfields is null + * 1. count(*)/spread(ts) is invoked + * 2. this column does not exists + * + * first filter the data block according to the value filter condition, then, if + * the top/bottom query applied, invoke the filter function to decide if the data block need to be accessed or not. + * @param pQuery + * @param pField + * @return + */ +static bool needToLoadDataBlock(SQuery *pQuery, SField *pField, SQLFunctionCtx *pCtx) { + if (pField == NULL) { + return false; // no need to load data + } + + for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { + SColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; + int32_t colIndex = pFilterInfo->pFilter.colIdx; + + // this column not valid in current data block + if (colIndex < 0 || pField[colIndex].colId != pFilterInfo->pFilter.data.colId) { + continue; + } + + // not support pre-filter operation on binary/nchar data type + if (!vnodeSupportPrefilter(pFilterInfo->pFilter.data.type)) { + continue; + } + + if (pFilterInfo->pFilter.data.type == TSDB_DATA_TYPE_FLOAT) { + float minval = *(double *)(&pField[colIndex].min); + float maxval = *(double *)(&pField[colIndex].max); + + if (!pFilterInfo->fp(&pFilterInfo->pFilter, (char *)&minval, (char *)&maxval)) { + return false; + } + } else { + if (!pFilterInfo->fp(&pFilterInfo->pFilter, (char *)&pField[colIndex].min, (char *)&pField[colIndex].max)) { + return false; + } + } + } + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functId = pQuery->pSelectExpr[i].pBase.functionId; + if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_TOP_DST || functId == TSDB_FUNC_BOTTOM || + functId == TSDB_FUNC_BOTTOM_DST) { + return top_bot_datablock_filter(&pCtx[i], functId, (char *)&pField[i].min, (char *)&pField[i].max); + } + } + + return true; +} + +static int32_t applyAllFunctions_Filter(SQueryRuntimeEnv *pRuntimeEnv, int32_t *forwardStep, TSKEY *primaryKeyCol, + char *data, SField *pFields, SBlockInfo *pBlockInfo, bool isDiskFileBlock) { + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + SQuery * pQuery = pRuntimeEnv->pQuery; + + int64_t prevNumOfRes = getNumOfResult(pRuntimeEnv); + + SArithmeticSupport *sasArray = calloc(pQuery->numOfOutputCols, sizeof(SArithmeticSupport)); + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId; + + bool hasNull = hasNullVal(pQuery, k, pBlockInfo, pFields, isDiskFileBlock); + char *dataBlock = getDataBlocks(pRuntimeEnv, data, &sasArray[k], k, isDiskFileBlock); + + TSKEY ts = QUERY_IS_ASC_QUERY(pQuery) ? pQuery->skey : pQuery->ekey; + int64_t alignedTimestamp = taosGetIntervalStartTimestamp(ts, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit); + + setExecParams(pQuery, &pCtx[k], alignedTimestamp, dataBlock, (char *)primaryKeyCol, (*forwardStep), functionId, + pFields, hasNull, pRuntimeEnv->blockStatus, &sasArray[k], pRuntimeEnv->scanFlag); + } + + // set the input column data + for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { + SColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; + int32_t colIdx = isDiskFileBlock ? pFilterInfo->pFilter.colIdxInBuf : pFilterInfo->pFilter.colIdx; + SColumnFilterMsg * pFilterMsg = &pFilterInfo->pFilter.data; + /* NOTE: here the tbname/tags column cannot reach here, so we do NOT check if is a tag or not */ + pFilterInfo->pData = doGetDataBlocks(isDiskFileBlock, pRuntimeEnv, data, colIdx, pFilterMsg->colId, + pFilterMsg->type, pFilterMsg->bytes, pFilterInfo->pFilter.colIdxInBuf); + } + + int32_t numOfRes = 0; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + // from top to bottom in desc + // from bottom to top in asc order + for (int32_t j = 0; j < (*forwardStep); ++j) { + if (!vnodeDoFilterData(pQuery, pQuery->pos + j * step)) { + continue; + } + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + if (pRuntimeEnv->go[k]) { + int32_t offset = pQuery->pos + j * step; + offset -= pCtx[k].startOffset; + + pRuntimeEnv->go[k] = aAggs[pQuery->pSelectExpr[k].pBase.functionId].xFunctionF(&pCtx[k], offset); + } + } + + /* + * pointsOffset is the maximum available space in result buffer + * update the actual forward step for query that requires checking buffer during loop + */ + if ((pQuery->checkBufferInLoop == 1) && (++numOfRes) >= pQuery->pointsOffset) { + pQuery->lastKey = primaryKeyCol[pQuery->pos + j * step] + step; + *forwardStep = j + 1; + break; + } + } + + free(sasArray); + + return getNumOfResult(pRuntimeEnv) - prevNumOfRes; +} + +static int32_t getForwardStepsInBlock(int32_t numOfPoints, __block_search_fn_t searchFn, SQuery *pQuery, + int64_t *pData) { + int32_t endPos = searchFn((char *)pData, numOfPoints, pQuery->ekey, pQuery->order.order); + int32_t forwardStep = 0; + + if (endPos >= 0) { + forwardStep = QUERY_IS_ASC_QUERY(pQuery) ? (endPos - pQuery->pos) : (pQuery->pos - endPos); + assert(forwardStep >= 0); + + /* endPos data is equalled to the key so, we do need to read the element in + * endPos */ + if (pData[endPos] == pQuery->ekey) { + forwardStep += 1; + } + } + return forwardStep; +} + +static int32_t reviseForwardSteps(SQuery *pQuery, int32_t forwardStep) { + /* + * if value filter exists, we try all data in current block, and do not set the QUERY_RESBUF_FULL flag. + * + * in handing of top/bottom query, the checkBufferInLoop == 1 and pQuery->numOfFilterCols may be 0 or not, + * so we have to exclude the query of top/bottom from checking for buffer status. + */ + if (pQuery->checkBufferInLoop == 1 && pQuery->numOfFilterCols == 0 && !isTopBottomQuery(pQuery)) { + /* current buffer does not have enough space, try in the next loop */ + if (pQuery->pointsOffset <= forwardStep) { + forwardStep = pQuery->pointsOffset; + } + } + return forwardStep; +} + +static void validateQueryRangeAndData(SQueryRuntimeEnv *pRuntimeEnv, const TSKEY *pPrimaryColumn, + SBlockInfo *pBlockBasicInfo) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + TSKEY startKey = -1; + // timestamp qualification check + if (IS_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus) && needPrimaryTimestampCol(pQuery, pBlockBasicInfo)) { + startKey = pPrimaryColumn[pQuery->pos]; + } else { + startKey = pBlockBasicInfo->keyFirst; + TSKEY endKey = pBlockBasicInfo->keyLast; + + assert((endKey <= pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (endKey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))); + } + + assert((startKey >= pQuery->lastKey && startKey <= pQuery->ekey && pQuery->skey <= pQuery->lastKey && + QUERY_IS_ASC_QUERY(pQuery)) || + (startKey <= pQuery->lastKey && startKey >= pQuery->ekey && pQuery->skey >= pQuery->lastKey && + !QUERY_IS_ASC_QUERY(pQuery))); +} + +static int32_t applyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pBlockInfo, int64_t *pPrimaryColumn, + char *sdata, SField *pFields, __block_search_fn_t searchFn, int32_t *numOfRes) { + int32_t forwardStep = 0; + SQuery *pQuery = pRuntimeEnv->pQuery; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + validateQueryRangeAndData(pRuntimeEnv, pPrimaryColumn, pBlockInfo); + + if (QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->ekey < pBlockInfo->keyLast) { + forwardStep = getForwardStepsInBlock(pBlockInfo->size, searchFn, pQuery, pPrimaryColumn); + assert(forwardStep >= 0); + + if (forwardStep == 0) { + /* no qualified data in current block, do not update the lastKey value + */ + assert(pQuery->ekey < pPrimaryColumn[pQuery->pos]); + } else { + pQuery->lastKey = pPrimaryColumn[pQuery->pos + (forwardStep - 1)] + step; + } + + } else { + forwardStep = pBlockInfo->size - pQuery->pos; + assert(forwardStep > 0); + + pQuery->lastKey = pBlockInfo->keyLast + step; + } + } else { // desc + if (pQuery->ekey > pBlockInfo->keyFirst) { + forwardStep = getForwardStepsInBlock(pBlockInfo->size, searchFn, pQuery, pPrimaryColumn); + assert(forwardStep >= 0); + + if (forwardStep == 0) { + /* no qualified data in current block, do not update the lastKey value + */ + assert(pQuery->ekey > pPrimaryColumn[pQuery->pos]); + } else { + pQuery->lastKey = pPrimaryColumn[pQuery->pos - (forwardStep - 1)] + step; + } + } else { + forwardStep = pQuery->pos + 1; + assert(forwardStep > 0); + + pQuery->lastKey = pBlockInfo->keyFirst + step; + } + } + + int32_t newForwardStep = reviseForwardSteps(pQuery, forwardStep); + assert(newForwardStep <= forwardStep && newForwardStep >= 0); + + /* if buffer limitation is applied, there must be primary column(timestamp) loaded */ + if (newForwardStep < forwardStep && newForwardStep > 0) { + pQuery->lastKey = pPrimaryColumn[pQuery->pos + (newForwardStep - 1) * step] + step; + } + + bool isFileBlock = IS_FILE_BLOCK(pRuntimeEnv->blockStatus); + if (pQuery->numOfFilterCols > 0) { + *numOfRes = + applyAllFunctions_Filter(pRuntimeEnv, &newForwardStep, pPrimaryColumn, sdata, pFields, pBlockInfo, isFileBlock); + } else { + *numOfRes = applyAllFunctions(pRuntimeEnv, newForwardStep, pPrimaryColumn, sdata, pFields, pBlockInfo, isFileBlock); + } + + assert(*numOfRes >= 0); + + // check if buffer is large enough for accommodating all qualified points + if (*numOfRes > 0 && pQuery->checkBufferInLoop == 1) { + pQuery->pointsOffset -= *numOfRes; + if (pQuery->pointsOffset == 0) { + setQueryStatus(pQuery, QUERY_RESBUF_FULL); + } + } + + return newForwardStep; +} + +int32_t vnodeGetVnodeHeaderFileIdx(int32_t *fid, SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { + if (pRuntimeEnv->numOfFiles == 0) { + return -1; + } + + /* set the initial file for current query */ + if (order == TSQL_SO_ASC && *fid < pRuntimeEnv->pHeaderFiles[0].fileID) { + *fid = pRuntimeEnv->pHeaderFiles[0].fileID; + return 0; + } else if (order == TSQL_SO_DESC && *fid > pRuntimeEnv->pHeaderFiles[pRuntimeEnv->numOfFiles - 1].fileID) { + *fid = pRuntimeEnv->pHeaderFiles[pRuntimeEnv->numOfFiles - 1].fileID; + return pRuntimeEnv->numOfFiles - 1; + } + + int32_t numOfFiles = pRuntimeEnv->numOfFiles; + int32_t step = (order == TSQL_SO_ASC) ? 1 : -1; + + if (order == TSQL_SO_DESC && *fid > pRuntimeEnv->pHeaderFiles[numOfFiles - 1].fileID) { + *fid = pRuntimeEnv->pHeaderFiles[numOfFiles - 1].fileID; + return numOfFiles - 1; + } + + int32_t i = (order == TSQL_SO_ASC) ? 0 : numOfFiles - 1; + while ((i < numOfFiles) && (i >= 0) && (*fid != pRuntimeEnv->pHeaderFiles[i].fileID)) { + i += step; + } + + if (i == numOfFiles || i < 0) { + return -1; + } else { + *fid = pRuntimeEnv->pHeaderFiles[i].fileID; + return i; + } +} + +int32_t getNextDataFileCompInfo(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, int32_t step) { + SQuery *pQuery = pRuntimeEnv->pQuery; + pQuery->fileId += step; + + int32_t fid = 0; + int32_t order = (step == QUERY_ASC_FORWARD_STEP) ? TSQL_SO_ASC : TSQL_SO_DESC; + while (1) { + fid = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, order); + + // no files left, abort + if (fid < 0) { + if (step == QUERY_ASC_FORWARD_STEP) { + dTrace("QInfo:%p no file to access, try data in cache", GET_QINFO_ADDR(pQuery)); + } else { + dTrace("QInfo:%p no file to access in desc order, query completed", GET_QINFO_ADDR(pQuery)); + } + + vnodeFreeFieldsEx(pRuntimeEnv); + pQuery->fileId = -1; + break; + } + + if (vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fid) > 0) { + break; + } + + /* + * 1. failed to read blk information from header file or open data file failed + * 2. header file is empty + * + * try next one + */ + pQuery->fileId += step; + + /* for backwards search, if the first file is not valid, abort */ + if (step < 0 && fid == 0) { + vnodeFreeFieldsEx(pRuntimeEnv); + pQuery->fileId = -1; + fid = -1; + break; + } + } + + return fid; +} + +void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, int64_t StartQueryTimestamp, void *inputData, + char *primaryColumnData, int32_t size, int32_t functionId, SField *pField, bool hasNull, + int32_t blockStatus, void *param, int32_t scanFlag) { + int32_t startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? pQuery->pos : pQuery->pos - (size - 1); + + pCtx->nStartQueryTimestamp = StartQueryTimestamp; + pCtx->scanFlag = scanFlag; + + pCtx->aInputElemBuf = inputData; + pCtx->hasNullValue = hasNull; + pCtx->blockStatus = blockStatus; + + if (pField != NULL) { + pCtx->preAggVals.isSet = true; + pCtx->preAggVals.wsum = pField->wsum; + pCtx->preAggVals.sum = pField->sum; + pCtx->preAggVals.max = pField->max; + pCtx->preAggVals.min = pField->min; + pCtx->preAggVals.numOfNullPoints = pField->numOfNullPoints; + } else { + pCtx->preAggVals.isSet = false; + } + + if (functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) { + // last_dist or first_dist function + // store the first&last timestamp into the intermediate buffer [1], the true + // value may be null but timestamp will never be null + pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); + } else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_WAVG || + functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_TOP_DST || functionId == TSDB_FUNC_BOTTOM_DST) { + /* + * leastsquares function needs two columns of input, + * currently, the x value of linear equation is set to timestamp column, and the y-value is the column specified in + * pQuery->pSelectExpr[i].colIdxInBuffer + * + * top/bottom function needs timestamp to indicate when the + * top/bottom values emerge, so does diff function + * + */ + if (functionId == TSDB_FUNC_WAVG) { + pCtx->intermediateBuf[3].nType = TSDB_DATA_TYPE_TIMESTAMP; + pCtx->intermediateBuf[3].i64Key = pQuery->ekey; + } + + pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); + + } else if (functionId == TSDB_FUNC_ARITHM) { + pCtx->param[0].pz = param; + } else if (functionId == TSDB_FUNC_WAVG_DST) { + ((SWavgRuntime *)pCtx->aOutputBuf)->sKey = StartQueryTimestamp; + ((SWavgRuntime *)pCtx->aOutputBuf)->eKey = pQuery->ekey; + + pCtx->ptsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); + } + + pCtx->startOffset = startOffset; + pCtx->size = size; + +#if defined(_DEBUG_VIEW) + int64_t *tsList = (int64_t *)(primaryColumnData + startOffset * TSDB_KEYSIZE); + int64_t s = tsList[0]; + int64_t e = tsList[size - 1]; + +// if (IS_DATA_BLOCK_LOADED(blockStatus)) { +// dTrace("QInfo:%p query ts:%lld-%lld, offset:%d, rows:%d, bstatus:%d, +// functId:%d", GET_QINFO_ADDR(pQuery), +// s, e, startOffset, size, blockStatus, functionId); +// } else { +// dTrace("QInfo:%p block not loaded, bstatus:%d", +// GET_QINFO_ADDR(pQuery), blockStatus); +// } +#endif +} + +static int32_t setupQueryRuntimeEnv(SMeterObj *pMeterObj, SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv, + SSchema *pTagsSchema, int16_t order) { + dTrace("QInfo:%p setup runtime env", GET_QINFO_ADDR(pQuery)); + + pRuntimeEnv->pMeterObj = pMeterObj; + pRuntimeEnv->pQuery = pQuery; + + //todo free all allocated resource + pRuntimeEnv->go = (bool *)malloc(sizeof(bool) * pQuery->numOfOutputCols); + if (pRuntimeEnv->go == NULL) { + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + memset(pRuntimeEnv->go, 1, sizeof(bool) * pQuery->numOfOutputCols); + + pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutputCols, sizeof(SQLFunctionCtx)); + + pRuntimeEnv->offset[0] = 0; + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SSqlFuncExprMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].pBase; + SColIndexEx * pColIndexEx = &pSqlFuncMsg->colInfo; + + SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; + + if (pSqlFuncMsg->colInfo.isTag) { // process tag column info + pCtx->inputType = pTagsSchema[pColIndexEx->colIdx].type; + pCtx->inputBytes = pTagsSchema[pColIndexEx->colIdx].bytes; + } else { + pCtx->inputType = GET_COLUMN_TYPE(pQuery, i); + pCtx->inputBytes = GET_COLUMN_BYTES(pQuery, i); + } + + pCtx->ptsOutputBuf = NULL; + + pCtx->outputBytes = pQuery->pSelectExpr[i].resBytes; + pCtx->outputType = pQuery->pSelectExpr[i].resType; + pCtx->order = pQuery->order.order; + + pCtx->numOfParams = pSqlFuncMsg->numOfParams; + for (int32_t j = 0; j < pCtx->numOfParams; ++j) { + /* + * tricky: in case of char array parameters, we employ the shallow copy method + * and get the ownership of the char array, it later release the allocated memory if exists + */ + pCtx->param[j].nType = pSqlFuncMsg->arg[j].argType; + pCtx->param[j].i64Key = pSqlFuncMsg->arg[j].argValue.i64; + } + + /* set the order information for top/bottom query */ + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { + assert(pQuery->pSelectExpr[0].pBase.functionId == TSDB_FUNC_TS || + pQuery->pSelectExpr[0].pBase.functionId == TSDB_FUNC_TS_DUMMY); + + pCtx->param[2].i64Key = order; + pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT; + pCtx->param[3].i64Key = functionId; + pCtx->param[3].nType = TSDB_DATA_TYPE_BIGINT; + + pCtx->param[1].i64Key = pQuery->order.orderColId; + } + + if (i > 0) { + pRuntimeEnv->offset[i] = pRuntimeEnv->offset[i - 1] + pRuntimeEnv->pCtx[i - 1].outputBytes; + } + } + + resetCtxOutputBuf(pRuntimeEnv); + initCtxOutputBuf(pRuntimeEnv); + + /* for loading block data in memory */ + assert(vnodeList[pMeterObj->vnode].cfg.rowsInFileBlock == pMeterObj->pointsPerFileBlock); + + pRuntimeEnv->buffer = + (char *)malloc(pQuery->dataRowSize * pMeterObj->pointsPerFileBlock + sizeof(SData) * pQuery->numOfCols); + + if (pRuntimeEnv->buffer == NULL) { + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + pRuntimeEnv->colDataBuffer[0] = (SData *)pRuntimeEnv->buffer; + for (int32_t i = 1; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i - 1].data.bytes; + pRuntimeEnv->colDataBuffer[i] = + (SData *)(((void *)pRuntimeEnv->colDataBuffer[i - 1]) + sizeof(SData) + pMeterObj->pointsPerFileBlock * bytes); + } + + /* record the maximum column width among columns of this meter/metric */ + int32_t maxColWidth = pQuery->colList[0].data.bytes; + for (int32_t i = 1; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i].data.bytes; + if (bytes > maxColWidth) { + maxColWidth = bytes; + } + } + + pRuntimeEnv->primaryColBuffer = NULL; + if (PRIMARY_TSCOL_LOADED(pQuery)) { + pRuntimeEnv->primaryColBuffer = pRuntimeEnv->colDataBuffer[0]; + } else { + pRuntimeEnv->primaryColBuffer = (SData *)malloc(pMeterObj->pointsPerFileBlock * TSDB_KEYSIZE + sizeof(SData)); + } + + pRuntimeEnv->internalBufSize = (size_t) (maxColWidth*pMeterObj->pointsPerFileBlock + EXTRA_BYTES);//plus extra_bytes + + pRuntimeEnv->unzipBuffer = (char *)malloc(pRuntimeEnv->internalBufSize); + pRuntimeEnv->secondaryUnzipBuffer = (char *)calloc(1, pRuntimeEnv->internalBufSize); + + if (pRuntimeEnv->unzipBuffer == NULL) { + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + if (pRuntimeEnv->secondaryUnzipBuffer == NULL) { + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + return TSDB_CODE_SUCCESS; +} + +static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { + /* query runtime env is not called */ + if (pRuntimeEnv->pQuery == NULL) { + return; + } + + dTrace("QInfo:%p teardown runtime env", GET_QINFO_ADDR(pRuntimeEnv->pQuery)); + tfree(pRuntimeEnv->buffer); + tfree(pRuntimeEnv->secondaryUnzipBuffer); + tfree(pRuntimeEnv->go); + + if (pRuntimeEnv->pCtx != NULL) { + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { + SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; + + for (int32_t j = 0; j < pCtx->numOfParams; ++j) { + tVariantDestroy(&pCtx->param[j]); + } + + for (int32_t j = 0; j < tListLen(pCtx->intermediateBuf); ++j) { + tVariantDestroy(&pCtx->intermediateBuf[j]); + } + } + + tfree(pRuntimeEnv->pCtx); + } + + tfree(pRuntimeEnv->unzipBuffer); + + if (pRuntimeEnv->pQuery && (!PRIMARY_TSCOL_LOADED(pRuntimeEnv->pQuery))) { + tfree(pRuntimeEnv->primaryColBuffer); + } + + for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) { + SQueryFileInfo *pQFileInfo = &(pRuntimeEnv->pHeaderFiles[i]); + if (pQFileInfo->pHeaderFileData != NULL && pQFileInfo->pHeaderFileData != MAP_FAILED) { + munmap(pQFileInfo->pHeaderFileData, pQFileInfo->headFileSize); + } + tclose(pQFileInfo->headerFd); + + if (pQFileInfo->pDataFileData != NULL && pQFileInfo->pDataFileData != MAP_FAILED) { + munmap(pQFileInfo->pDataFileData, pQFileInfo->defaultMappingSize); + } + + tclose(pQFileInfo->dataFd); + tclose(pQFileInfo->lastFd); + } + + if (pRuntimeEnv->pHeaderFiles != NULL) { + pRuntimeEnv->numOfFiles = 0; + free(pRuntimeEnv->pHeaderFiles); + } + + if (pRuntimeEnv->pInterpoBuf != NULL) { + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { + tfree(pRuntimeEnv->pInterpoBuf[i]); + } + + tfree(pRuntimeEnv->pInterpoBuf); + } +} + +// get maximum time interval in each file +static int64_t getOldestKey(int32_t numOfFiles, int64_t fileId, SVnodeCfg *pCfg) { + int64_t duration = pCfg->daysPerFile * tsMsPerDay[pCfg->precision]; + return (fileId - numOfFiles + 1) * duration; +} + +bool isQueryKilled(SQuery *pQuery) { + SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + return (pQInfo->killed == 1); +} + +bool isFixedOutputQuery(SQuery *pQuery) { + if (pQuery->nAggTimeInterval != 0) { + return false; + } + + // Note:top/bottom query is fixed output query + if (isTopBottomQuery(pQuery)) { + return true; + } + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (IS_MULTIOUTPUT(aAggs[functionId].nStatus)) { + return false; + } + } + + return true; +} + +bool isPointInterpoQuery(SQuery *pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionID = pQuery->pSelectExpr[i].pBase.functionId; + if (functionID == TSDB_FUNC_INTERP || functionID == TSDB_FUNC_LAST_ROW || functionID == TSDB_FUNC_LAST_ROW_DST) { + return true; + } + } + + return false; +} + +// TODO REFACTOR:MERGE WITH CLIENT-SIDE FUNCTION +bool isTopBottomQuery(SQuery *pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_TS) { + continue; + } + + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TOP_DST || + functionId == TSDB_FUNC_BOTTOM_DST) { + return true; + } + } + + return false; +} + +bool isFirstLastRowQuery(SQuery *pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionID = pQuery->pSelectExpr[i].pBase.functionId; + if (functionID == TSDB_FUNC_LAST_ROW || functionID == TSDB_FUNC_LAST_ROW_DST) { + return true; + } + } + + return false; +} + +bool needSupplementaryScan(SQuery *pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { + continue; + } + + if (((functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) && QUERY_IS_ASC_QUERY(pQuery)) || + ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && !QUERY_IS_ASC_QUERY(pQuery))) { + return true; + } + } + + return false; +} +///////////////////////////////////////////////////////////////////////////////////////////// +static int32_t binarySearchInCacheBlk(SCacheInfo *pCacheInfo, SQuery *pQuery, int32_t keyLen, int32_t firstSlot, + int32_t lastSlot) { + int32_t midSlot = 0; + + while (1) { + int32_t numOfBlocks = (lastSlot - firstSlot + 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + if (numOfBlocks == 0) { + numOfBlocks = pCacheInfo->maxBlocks; + } + + midSlot = (firstSlot + (numOfBlocks >> 1)) % pCacheInfo->maxBlocks; + SCacheBlock *pBlock = pCacheInfo->cacheBlocks[midSlot]; + + TSKEY keyFirst = *((TSKEY *)pBlock->offset[0]); + TSKEY keyLast = *((TSKEY *)(pBlock->offset[0] + (pBlock->numOfPoints - 1) * keyLen)); + + if (numOfBlocks == 1) { + break; + } + + if (pQuery->skey > keyLast) { + if (numOfBlocks == 2) break; + if (!QUERY_IS_ASC_QUERY(pQuery)) { + int nextSlot = (midSlot + 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + SCacheBlock *pNextBlock = pCacheInfo->cacheBlocks[nextSlot]; + TSKEY nextKeyFirst = *((TSKEY *)(pNextBlock->offset[0])); + if (pQuery->skey < nextKeyFirst) break; + } + firstSlot = (midSlot + 1) % pCacheInfo->maxBlocks; + } else if (pQuery->skey < keyFirst) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + int prevSlot = (midSlot - 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + SCacheBlock *pPrevBlock = pCacheInfo->cacheBlocks[prevSlot]; + TSKEY prevKeyLast = *((TSKEY *)(pPrevBlock->offset[0] + (pPrevBlock->numOfPoints - 1) * keyLen)); + if (pQuery->skey > prevKeyLast) { + break; + } + } + lastSlot = (midSlot - 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + } else { + break; // got the slot + } + } + + return midSlot; +} + +static void getQueryRange(SQuery *pQuery, TSKEY *min, TSKEY *max) { + *min = pQuery->lastKey < pQuery->ekey ? pQuery->lastKey : pQuery->ekey; + *max = pQuery->lastKey >= pQuery->ekey ? pQuery->lastKey : pQuery->ekey; +} + +static int32_t getFirstCacheSlot(int32_t numOfBlocks, int32_t lastSlot, SCacheInfo *pCacheInfo) { + return (lastSlot - numOfBlocks + 1 + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; +} + +static bool cacheBoundaryCheck(SQuery *pQuery, SMeterObj *pMeterObj) { + /* + * here we get the first slot from the meter cache, not from the cache snapshot from pQuery, since the + * snapshot value in pQuery may have been expired now. + */ + SCacheInfo * pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + SCacheBlock *pBlock = NULL; + + // earliest key in cache + TSKEY keyFirst = 0; + TSKEY keyLast = pMeterObj->lastKey; + + while (1) { + // keep the value in local variable, since it may be changed by other thread any time + int32_t numOfBlocks = pCacheInfo->numOfBlocks; + int32_t currentSlot = pCacheInfo->currentSlot; + + // no data in cache, return false directly + if (numOfBlocks == 0) { + return false; + } + + int32_t first = getFirstCacheSlot(numOfBlocks, currentSlot, pCacheInfo); + + /* + * pBlock may be null value since this block is flushed to disk, and re-distributes to + * other meter, so go on until we get the first not flushed cache block. + */ + if ((pBlock = getCacheDataBlock(pMeterObj, pQuery, first)) != NULL) { + keyFirst = getTimestampInCacheBlock(pBlock, 0); + break; + } else { + /* + * there may be only one empty cache block existed caused by import. + */ + if (numOfBlocks == 1) { + return false; + } + } + } + + TSKEY min, max; + getQueryRange(pQuery, &min, &max); + + // the query time range is earlier than the first element in cache. abort + if (max < keyFirst) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return false; + } + + if (min > keyLast) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return false; + } + + return true; +} + +void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid) { + // commitSlot here denotes the first uncommitted block in cache + int32_t numOfBlocks = 0; + int32_t lastSlot = 0; + + SCachePool *pPool = (SCachePool *)vnodeList[vid].pCachePool; + pthread_mutex_lock(&pPool->vmutex); + numOfBlocks = pCacheInfo->numOfBlocks; + lastSlot = pCacheInfo->currentSlot; + pthread_mutex_unlock(&pPool->vmutex); + + // make sure it is there, otherwise, return right away + pQuery->currentSlot = lastSlot; + pQuery->numOfBlocks = numOfBlocks; + pQuery->firstSlot = getFirstCacheSlot(numOfBlocks, lastSlot, pCacheInfo); + ; + + /* + * Note: the block id is continuous increasing, never becomes smaller. + * + * blockId is the maximum block id in cache of current meter during query. + * If any blocks' id are greater than this value, those blocks may be reallocated to other meters, + * or assigned new data of this meter, on which the query is performed should be ignored. + */ + if (pQuery->numOfBlocks > 0) { + pQuery->blockId = pCacheInfo->cacheBlocks[pQuery->currentSlot]->blockId; + } +} + +int64_t getQueryStartPositionInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t *slot, int32_t *pos, + bool ignoreQueryRange) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + pQuery->fileId = -1; + vnodeFreeFieldsEx(pRuntimeEnv); + + //keep in-memory cache status in local variables in case that it may be changed by write operation + getBasicCacheInfoSnapshot(pQuery, pMeterObj->pCache, pMeterObj->vnode); + + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || pQuery->numOfBlocks == 0) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return -1; + } + + assert((pQuery->lastKey >= pQuery->skey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey <= pQuery->skey && !QUERY_IS_ASC_QUERY(pQuery))); + + if (!ignoreQueryRange && !cacheBoundaryCheck(pQuery, pMeterObj)) { + return -1; + } + + /* find the appropriated slot that contains the requested points */ + TSKEY rawskey = pQuery->skey; + + /* here we actual start to query from pQuery->lastKey */ + pQuery->skey = pQuery->lastKey; + + (*slot) = binarySearchInCacheBlk(pCacheInfo, pQuery, TSDB_KEYSIZE, pQuery->firstSlot, pQuery->currentSlot); + + /* locate the first point of which time stamp is no less than pQuery->skey */ + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + SCacheBlock *pBlock = pCacheInfo->cacheBlocks[*slot]; + (*pos) = searchFn(pBlock->offset[0], pBlock->numOfPoints, pQuery->skey, pQuery->order.order); + + // restore skey before return + pQuery->skey = rawskey; + + // all data are less(greater) than the pQuery->lastKey in case of ascending(descending) query + if (*pos == -1) { + return -1; + } + + int64_t nextKey = getTimestampInCacheBlock(pBlock, *pos); + if ((nextKey < pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextKey > pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { + // all data are less than the pQuery->lastKey(pQuery->sKey) for asc query + return -1; + } + + SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); + return nextKey; +} + +/** + * check if data in disk. + */ +bool hasDataInDisk(SQuery *pQuery, SMeterObj *pMeterObj) { + SVnodeObj *pVnode = &vnodeList[pMeterObj->vnode]; + if (pVnode->numOfFiles <= 0) { + pQuery->fileId = -1; + return false; + } + + int64_t latestKey = pMeterObj->lastKeyOnFile; + int64_t oldestKey = getOldestKey(pVnode->numOfFiles, pVnode->fileId, &pVnode->cfg); + + TSKEY min, max; + getQueryRange(pQuery, &min, &max); + + /* query range is out of current time interval of table */ + if ((min > latestKey) || (max < oldestKey)) { + pQuery->fileId = -1; + return false; + } + + return true; +} + +bool hasDataInCache(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + + /* no data in cache, return */ + if ((pCacheInfo == NULL) || (pCacheInfo->cacheBlocks == NULL)) { + return false; + } + + /* numOfBlocks value has been overwrite, release pFields data if exists */ + vnodeFreeFieldsEx(pRuntimeEnv); + getBasicCacheInfoSnapshot(pQuery, pCacheInfo, pMeterObj->vnode); + if (pQuery->numOfBlocks <= 0) { + return false; + } + + return cacheBoundaryCheck(pQuery, pMeterObj); +} + +/** + * Get cache snapshot will destroy the comp block info in SQuery, in order to speedup the query + * process, we always check cache first. + */ +void vnodeCheckIfDataExists(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, bool *dataInDisk, bool *dataInCache) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + *dataInCache = hasDataInCache(pRuntimeEnv, pMeterObj); + *dataInDisk = hasDataInDisk(pQuery, pMeterObj); +} + +static void doGetAlignedIntervalQueryRangeImpl(SQuery *pQuery, int64_t qualifiedKey, int64_t keyFirst, int64_t keyLast, + int64_t *skey, int64_t *ekey) { + assert(qualifiedKey >= keyFirst && qualifiedKey <= keyLast); + + if (keyFirst > (INT64_MAX - pQuery->nAggTimeInterval)) { + /* + * if the skey > INT64_MAX - pQuery->nAggTimeInterval, the query duration between + * skey and ekey must be less than one interval.Therefore, no need to adjust the query ranges. + */ + assert(keyLast - keyFirst < pQuery->nAggTimeInterval); + + *skey = keyFirst; + *ekey = keyLast; + return; + } + + *skey = taosGetIntervalStartTimestamp(qualifiedKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit); + int64_t endKey = *skey + pQuery->nAggTimeInterval - 1; + + if (*skey < keyFirst) { + *skey = keyFirst; + } + + if (endKey < keyLast) { + *ekey = endKey; + } else { + *ekey = keyLast; + } +} + +static void doGetAlignedIntervalQueryRange(SQuery *pQuery, TSKEY key, TSKEY skey, TSKEY ekey) { + TSKEY skey1, ekey1; + + TSKEY skey2 = (skey < ekey) ? skey : ekey; + TSKEY ekey2 = (skey < ekey) ? ekey : skey; + + doGetAlignedIntervalQueryRangeImpl(pQuery, key, skey2, ekey2, &skey1, &ekey1); + + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->skey = skey1; + pQuery->ekey = ekey1; + assert(pQuery->skey <= pQuery->ekey); + } else { + pQuery->skey = ekey1; + pQuery->ekey = skey1; + assert(pQuery->skey >= pQuery->ekey); + } + + pQuery->lastKey = pQuery->skey; +} + +static void getOneRowFromDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, char **dst, int32_t pos) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + int32_t bytes = pQuery->colList[i].data.bytes; + memcpy(dst[i], pRuntimeEnv->colDataBuffer[i]->data + pos * bytes, bytes); + } +} + +static void getOneRowFromCacheBlock(SQueryRuntimeEnv *pRuntimeEnv, SMeterObj *pMeterObj, SCacheBlock *pBlock, + char **dst, int32_t pos) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + /* + * in case of cache block expired, the pos may exceed the number of points in block, so check + * the range in the first place. + */ + if (pos > pBlock->numOfPoints) { + pos = pBlock->numOfPoints; + } + + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + int32_t colIdx = pQuery->colList[i].colIdx; + int32_t colId = pQuery->colList[i].data.colId; + + SColumn *pCols = &pMeterObj->schema[colIdx]; + + if (colIdx < 0 || colIdx >= pMeterObj->numOfColumns || pCols->colId != colId) { // set null + setNull(dst[i], pCols->type, pCols->bytes); + } else { + memcpy(dst[i], pBlock->offset[colIdx] + pos * pCols->bytes, pCols->bytes); + } + } +} + +static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMeterObj, + SPointInterpoSupporter *pPointInterpSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + if (!isPointInterpoQuery(pQuery)) { + return false; + } + + /* + * for interpolate point query, points that are directly before/after the specified point are required + */ + if (isFirstLastRowQuery(pQuery)) { + assert(!QUERY_IS_ASC_QUERY(pQuery)); + } else { + assert(QUERY_IS_ASC_QUERY(pQuery)); + } + assert(pPointInterpSupporter != NULL && pQuery->skey == pQuery->ekey); + + SCacheBlock *pBlock = NULL; + + qTrace("QInfo:%p get next data point, fileId:%d, slot:%d, pos:%d", GET_QINFO_ADDR(pQuery), pQuery->fileId, + pQuery->slot, pQuery->pos); + + // save the point that is directly after or equals to the specified point + if (IS_DISK_DATA_BLOCK(pQuery)) { + getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); + } else { + pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + while (pBlock == NULL) { + // cache block is flushed to disk, try to find new query position again + getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + + // new position is located in file, load data and abort + if (IS_DISK_DATA_BLOCK(pQuery)) { + getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pNextPoint, pQuery->pos); + break; + } else { + pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + } + } + + if (!IS_DISK_DATA_BLOCK(pQuery)) { + getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pNextPoint, pQuery->pos); + } + } + + /* + * 1. for last_row query, return immediately. + * 2. the specified timestamp equals to the required key, interpolation according to neighbor points is not necessary + * for interp query. + */ + TSKEY actualKey = *(TSKEY *)pPointInterpSupporter->pNextPoint[0]; + if (isFirstLastRowQuery(pQuery) || actualKey == pQuery->skey) { + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + + /* + * the retrieved ts may not equals to pMeterObj->lastKey due to cache re-allocation + * set the pQuery->ekey/pQuery->skey/pQuery->lastKey to be the new value. + */ + if (pQuery->ekey != actualKey) { + pQuery->skey = actualKey; + pQuery->ekey = actualKey; + pQuery->lastKey = actualKey; + pSupporter->rawSKey = actualKey; + pSupporter->rawEKey = actualKey; + } + return true; + } + + /* the qualified point is not the first point in data block */ + if (pQuery->pos > 0) { + int32_t prevPos = pQuery->pos - 1; + + if (IS_DISK_DATA_BLOCK(pQuery)) { + /* save the point that is directly after the specified point */ + getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, prevPos); + } else { + getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pPrevPoint, prevPos); + } + } else { + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + savePointPosition(&pRuntimeEnv->startPos, pQuery->fileId, pQuery->slot, pQuery->pos); + + // backwards movement would not set the pQuery->pos correct. We need to set it manually later. + moveToNextBlock(pRuntimeEnv, QUERY_DESC_FORWARD_STEP, searchFn, true); + + /* + * no previous data exists reset the status and load the data block that contains the qualified point + */ + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { + dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%lld-%lld, out of range", + GET_QINFO_ADDR(pQuery), pRuntimeEnv->startPos.fileId, pRuntimeEnv->startPos.slot, pRuntimeEnv->startPos.pos, + pQuery->skey, pQuery->ekey); + + // no result, return immediately + setQueryStatus(pQuery, QUERY_COMPLETED); + return false; + } else { // prev has been located + if (pQuery->fileId >= 0) { + pQuery->pos = pQuery->pBlock[pQuery->slot].numOfPoints - 1; + getOneRowFromDiskBlock(pRuntimeEnv, pPointInterpSupporter->pPrevPoint, pQuery->pos); + + qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", + GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot, pQuery->pos, pQuery->pos); + } else { + pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + if (pBlock == NULL) { + // todo nothing, the previous block is flushed to disk + } else { + pQuery->pos = pBlock->numOfPoints - 1; + getOneRowFromCacheBlock(pRuntimeEnv, pMeterObj, pBlock, pPointInterpSupporter->pPrevPoint, pQuery->pos); + + qTrace("QInfo:%p get prev data point, fileId:%d, slot:%d, pos:%d, pQuery->pos:%d", + GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot, pBlock->numOfPoints - 1, pQuery->pos); + } + } + } + } + + pQuery->skey = *(TSKEY *)pPointInterpSupporter->pPrevPoint[0]; + pQuery->ekey = *(TSKEY *)pPointInterpSupporter->pNextPoint[0]; + pQuery->lastKey = pQuery->skey; + + return true; +} + +static bool doGetQueryPos(TSKEY key, SMeterQuerySupportObj *pSupporter, SPointInterpoSupporter *pPointInterpSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + + /* key in query range. If not, no qualified in disk file */ + if (key != -1 && key <= pQuery->ekey) { + if (isPointInterpoQuery(pQuery)) { /* no qualified data in this query range */ + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } else { + getAlignedIntervalQueryRange(pQuery, key, pQuery->skey, pQuery->ekey); + return true; + } + } else { // key > pQuery->ekey, abort for normal query, continue for interp query + if (isPointInterpoQuery(pQuery)) { + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } else { + return false; + } + } +} + +/** + * determine the first query range, according to raw query range [skey, ekey] and group-by interval. + * the time interval for aggregating is not enforced to check its validation, the minimum interval is not less than 10ms, + * which is guaranteed by parser at client-side + */ +bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj *pSupporter, + SPointInterpoSupporter *pPointInterpSupporter) { + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + // todo: the action return as the getQueryStartPositionInCache function + if (dataInDisk && getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_GREATER_EQUAL, searchFn)) { + TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + assert(key >= pQuery->skey); + + return doGetQueryPos(key, pSupporter, pPointInterpSupporter); + } + + // set no data in file + pQuery->fileId = -1; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + + /* if it is a interpolation query, the any points in cache that is greater than the query range is required */ + if (pCacheInfo == NULL || pCacheInfo->cacheBlocks == NULL || pCacheInfo->numOfBlocks == 0 || !dataInCache) { + return false; + } + + TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); + return doGetQueryPos(nextKey, pSupporter, pPointInterpSupporter); + + } else { // descending order + if (dataInCache) { // todo handle error + TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); + assert(nextKey == -1 || nextKey <= pQuery->skey); + + // valid data in cache + if (nextKey != -1) { + if (nextKey >= pQuery->ekey) { + if (isFirstLastRowQuery(pQuery)) { + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } else { + getAlignedIntervalQueryRange(pQuery, nextKey, pQuery->skey, pQuery->ekey); + return true; + } + } else { + /* + * nextKey < pQuery->ekey && nextKey < pQuery->lastKey, query range is + * larger than all data, abort NOTE: Interp query does not reach here, since for all interp query, + * the query order is ascending order. + */ + return false; + } + } else { // all data in cache are greater than pQuery->lastKey, try file + } + } + + if (dataInDisk && getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn)) { + TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + assert(key <= pQuery->skey); + + /* key in query range. If not, no qualified in disk file */ + if (key >= pQuery->ekey) { + if (isFirstLastRowQuery(pQuery)) { /* no qualified data in this query range */ + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } else { + getAlignedIntervalQueryRange(pQuery, key, pQuery->skey, pQuery->ekey); + return true; + } + } else { // Goes on in case of key in file less than pMeterObj->lastKey, + // which is also the pQuery->skey + if (isFirstLastRowQuery(pQuery)) { + return getNeighborPoints(pSupporter, pMeterObj, pPointInterpSupporter); + } + } + } + } + + return false; +} + +// todo handle the mmap relative offset value assert problem +int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv *pRuntimeEnv, SPositionInfo *position) { + TSKEY nextTimestamp = -1; + + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + pQuery->fileId = position->fileId; + pQuery->slot = position->slot; + pQuery->pos = position->pos; + + if (position->fileId == -1) { + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + if (pCacheInfo == NULL || pCacheInfo->numOfBlocks == 0 || pCacheInfo->cacheBlocks == NULL) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return -1; + } + + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + if (pBlock != NULL) { + nextTimestamp = getTimestampInCacheBlock(pBlock, position->pos); + } else { + // todo fix it + } + + SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); + } else { + // todo handle the file broken situation + /* + * load the file metadata into buffer first, then the specific data block. + * currently opened file is not the start file, reset to the start file + */ + int32_t fileIdx = vnodeGetVnodeHeaderFileIdx(&pQuery->fileId, pRuntimeEnv, pQuery->order.order); + if (fileIdx < 0) { + dError("QInfo:%p failed to get data file:%d", GET_QINFO_ADDR(pQuery), pQuery->fileId); + // ignore the files on disk + position->fileId = -1; + return -1; + } + + /* + * NOTE: the compblock information may not be loaded yet, here loaded it firstly + * If the compBlock info is loaded, it wont be loaded again. + */ + int32_t numOfBlocks = vnodeGetCompBlockInfo(pMeterObj, pRuntimeEnv, fileIdx); + assert(numOfBlocks > 0); + + nextTimestamp = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + } + + return nextTimestamp; +} + +static void setScanLimitationByResultBuffer(SQuery *pQuery) { + bool multiOutput = false; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + multiOutput = IS_MULTIOUTPUT(aAggs[functionId].nStatus) && (functionId != TSDB_FUNC_TOP) && + (functionId != TSDB_FUNC_BOTTOM) && (functionId != TSDB_FUNC_TOP_DST) && + (functionId != TSDB_FUNC_BOTTOM_DST); + if (multiOutput) { + break; + } + } + + pQuery->checkBufferInLoop = multiOutput ? 1 : 0; + pQuery->pointsOffset = pQuery->pointsToRead; +} + +/* + * todo add more parameters to check soon.. + */ +bool vnodeParametersSafetyCheck(SQuery *pQuery) { + // load data column information is incorrect + for (int32_t i = 0; i < pQuery->numOfCols - 1; ++i) { + if (pQuery->colList[i].data.colId == pQuery->colList[i + 1].data.colId) { + dError("QInfo:%p invalid data load column for query", GET_QINFO_ADDR(pQuery)); + return false; + } + } + return true; +} + +static int file_order_comparator(const void *p1, const void *p2) { + SQueryFileInfo *pInfo1 = (SQueryFileInfo *)p1; + SQueryFileInfo *pInfo2 = (SQueryFileInfo *)p2; + + if (pInfo1->fileID == pInfo2->fileID) { + return 0; + } + + return (pInfo1->fileID > pInfo2->fileID) ? 1 : -1; +} + +/** + * open a data files and header file for metric meta query + * @param pQInfo + * @param pVnodeFiles + * @param fid + * @param vnodeId + * @param fileName + * @param prefix + * @return + */ +static int32_t vnodeOpenVnodeDBFiles(SQInfo *pQInfo, SQueryFileInfo *pVnodeFiles, int32_t fid, int32_t vnodeId, + char *fileName, char *prefix) { + __off_t size = 0; + + pVnodeFiles->fileID = fid; + pVnodeFiles->defaultMappingSize = DEFAULT_DATA_FILE_MMAP_WINDOW_SIZE; + + snprintf(pVnodeFiles->headerFilePath, 256, "%s%s", prefix, fileName); + +#if 1 + pVnodeFiles->headerFd = open(pVnodeFiles->headerFilePath, O_RDONLY); +#else + int32_t *val = (int32_t *)taosGetStrHashData(fileHandleHashList, pVnodeFiles->headerFilePath); + if (val == NULL) { + pVnodeFiles->headerFd = open(pVnodeFiles->headerFilePath, O_RDONLY); + taosAddStrHash(fileHandleHashList, pVnodeFiles->headerFilePath, (char *)&pVnodeFiles->headerFd); + } else { + pVnodeFiles->headerFd = *val; + } +#endif + + if (!VALIDFD(pVnodeFiles->headerFd)) { + dError("QInfo:%p failed open header file:%s reason:%s", pQInfo, pVnodeFiles->headerFilePath, strerror(errno)); + goto _clean; + } + + struct stat fstat; + if (stat(pVnodeFiles->headerFilePath, &fstat) < 0) return -1; + pVnodeFiles->headFileSize = fstat.st_size; + size = fstat.st_size; + + pVnodeFiles->pHeaderFileData = mmap(NULL, size, PROT_READ, MAP_SHARED, pVnodeFiles->headerFd, 0); + if (pVnodeFiles->pHeaderFileData == MAP_FAILED) { + dError("QInfo:%p failed to map header file:%s, %s", pQInfo, pVnodeFiles->headerFilePath, strerror(errno)); + goto _clean; + } + + if (madvise(pVnodeFiles->pHeaderFileData, size, MADV_SEQUENTIAL) == -1) { + /* even the advise failed, continue.. */ + dError("QInfo:%p failed to advise kernel the usage of header files, reason:%s", pQInfo, strerror(errno)); + } + + snprintf(pVnodeFiles->dataFilePath, 256, "%sv%df%d.data", prefix, vnodeId, fid); + snprintf(pVnodeFiles->lastFilePath, 256, "%sv%df%d.last", prefix, vnodeId, fid); + +#if 1 + pVnodeFiles->dataFd = open(pVnodeFiles->dataFilePath, O_RDONLY); + pVnodeFiles->lastFd = open(pVnodeFiles->lastFilePath, O_RDONLY); +#else + val = (int32_t *)taosGetStrHashData(fileHandleHashList, pVnodeFiles->dataFilePath); + if (val == NULL) { + pVnodeFiles->dataFd = open(pVnodeFiles->dataFilePath, O_RDONLY); + taosAddStrHash(fileHandleHashList, pVnodeFiles->dataFilePath, (char *)&pVnodeFiles->dataFd); + } else { + pVnodeFiles->dataFd = *val; + } +#endif + + if (!VALIDFD(pVnodeFiles->dataFd)) { + dError("QInfo:%p failed to open data file:%s, reason:%s", pQInfo, pVnodeFiles->dataFilePath, strerror(errno)); + goto _clean; + } + + if (!VALIDFD(pVnodeFiles->lastFd)) { + dError("QInfo:%p failed to open last file:%s, reason:%s", pQInfo, pVnodeFiles->lastFilePath, strerror(errno)); + goto _clean; + } + +#if 0 + size = lseek(pVnodeFiles->dataFd, 0, SEEK_END); + pVnodeFiles->dataFileSize = size; + pVnodeFiles->dtFileMappingOffset = 0; +#else + if (stat(pVnodeFiles->dataFilePath, &fstat) < 0) return -1; + pVnodeFiles->dataFileSize = fstat.st_size; + + if (stat(pVnodeFiles->lastFilePath, &fstat) < 0) return -1; + pVnodeFiles->lastFileSize = fstat.st_size; +#endif + + /* enforce kernel to preload data when the file is mapping */ + pVnodeFiles->pDataFileData = mmap(NULL, pVnodeFiles->defaultMappingSize, PROT_READ, MAP_PRIVATE | MAP_POPULATE, + pVnodeFiles->dataFd, pVnodeFiles->dtFileMappingOffset); + if (pVnodeFiles->pDataFileData == MAP_FAILED) { + dError("QInfo:%p failed to map data file:%s, %s", pQInfo, pVnodeFiles->dataFilePath, strerror(errno)); + goto _clean; + } + + /* advise kernel the usage of mmaped data */ + if (madvise(pVnodeFiles->pDataFileData, pVnodeFiles->defaultMappingSize, MADV_SEQUENTIAL) == -1) { + dError("QInfo:%p failed to advise kernel the usage of data file:%s, reason:%s", pQInfo, pVnodeFiles->dataFilePath, + strerror(errno)); + } + + return 0; + +_clean: + if (pVnodeFiles->pHeaderFileData != MAP_FAILED && pVnodeFiles->pDataFileData != NULL) { + munmap(pVnodeFiles->pHeaderFileData, pVnodeFiles->headFileSize); + pVnodeFiles->pHeaderFileData = NULL; + } + + if (pVnodeFiles->pDataFileData != MAP_FAILED && pVnodeFiles->pDataFileData != NULL) { + munmap(pVnodeFiles->pDataFileData, pVnodeFiles->defaultMappingSize); + pVnodeFiles->pDataFileData = NULL; + } + + tclose(pVnodeFiles->headerFd); + tclose(pVnodeFiles->dataFd); + tclose(pVnodeFiles->lastFd); + return -1; +} + +static void vnodeGetFilesSnapshot(SQInfo *pQInfo, int32_t vnodeId) { + char dbFilePathPrefix[TSDB_FILENAME_LEN] = {0}; + + sprintf(dbFilePathPrefix, "%s/vnode%d/db/", tsDirectory, vnodeId); + DIR *pDir = opendir(dbFilePathPrefix); + if (pDir == NULL) { + dError("QInfo:%p failed to open directory:%s", pQInfo, dbFilePathPrefix); + return; + } + + char suffix[] = ".head"; + + struct dirent *pEntry = NULL; + int32_t alloc = 4; // default allocated size + + SQueryRuntimeEnv *pRuntimeEnv = &(pQInfo->pMeterQuerySupporter->runtimeEnv); + pRuntimeEnv->pHeaderFiles = calloc(1, sizeof(SQueryFileInfo) * alloc); + SVnodeObj *pVnode = &vnodeList[vnodeId]; + + while ((pEntry = readdir(pDir)) != NULL) { + if ((pEntry->d_name[0] == '.' && pEntry->d_name[1] == '\0') || (strcmp(pEntry->d_name, "..") == 0)) { + continue; + } + + if (pEntry->d_type & DT_DIR) { + continue; + } + + size_t len = strlen(pEntry->d_name); + if (strcasecmp(&pEntry->d_name[len - 5], suffix) != 0) { + continue; + } + + int32_t vid = 0; + int32_t fid = 0; + sscanf(pEntry->d_name, "v%df%d", &vid, &fid); + if (vid != vnodeId) { /* ignore error files */ + dError("QInfo:%p error data file:%s in vid:%d, ignore", pQInfo, pEntry->d_name, vnodeId); + continue; + } + + if (fid > pVnode->fileId || (fid < pVnode->fileId - pVnode->numOfFiles + 1)) { + dError("QInfo:%p error data file:%s in vid:%d, ignore", pQInfo, pEntry->d_name, vnodeId); + continue; + } + + assert(fid >= 0 && vid >= 0); + + if (++pRuntimeEnv->numOfFiles > alloc) { + alloc = alloc << 1; + pRuntimeEnv->pHeaderFiles = realloc(pRuntimeEnv->pHeaderFiles, alloc * sizeof(SQueryFileInfo)); + memset(&pRuntimeEnv->pHeaderFiles[alloc >> 1], 0, (alloc >> 1) * sizeof(SQueryFileInfo)); + } + + SQueryFileInfo *pVnodeFiles = &pRuntimeEnv->pHeaderFiles[pRuntimeEnv->numOfFiles - 1]; + int32_t ret = vnodeOpenVnodeDBFiles(pQInfo, pVnodeFiles, fid, vnodeId, pEntry->d_name, dbFilePathPrefix); + if (ret < 0) { + memset(pVnodeFiles, 0, sizeof(SQueryFileInfo)); // reset information + pRuntimeEnv->numOfFiles -= 1; + } + } + + closedir(pDir); + + dTrace("QInfo:%p find %d data files in %s to be checked", pQInfo, pRuntimeEnv->numOfFiles, dbFilePathPrefix); + + /* order the files information according their names */ + qsort(pRuntimeEnv->pHeaderFiles, (size_t)pRuntimeEnv->numOfFiles, sizeof(SQueryFileInfo), file_order_comparator); +} + +static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, void *pBlock) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->pos += pQuery->limit.offset; + } else { + pQuery->pos -= pQuery->limit.offset; + } + + if (IS_DISK_DATA_BLOCK(pQuery)) { + pQuery->skey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + } else { + pQuery->skey = getTimestampInCacheBlock(pBlock, pQuery->pos); + } + + pQuery->lastKey = pQuery->skey; + pQuery->limit.offset = 0; +} + +// todo ignore the avg/sum/min/max/count/stddev/top/bottom functions, of which +// the scan order is not matter +static bool onlyOneQueryType(SQuery *pQuery, int32_t functId, int32_t functIdDst) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { + continue; + } + if (functionId != functId && functionId != functIdDst) { + return false; + } + } + + return true; +} + +static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_FIRST, TSDB_FUNC_FIRST_DST); } + +static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } + +static void rewriteExecOrder(SQuery *pQuery, bool metricQuery) { + // in case of point-interpolation query, use asc order scan + char msg[] = "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%lld-%lld, " + "new qrange:%lld-%lld"; + + // descending order query + if (isFirstLastRowQuery(pQuery)) { + dTrace("QInfo:%p scan order changed for last_row query, old:%d, new:%d", GET_QINFO_ADDR(pQuery), + pQuery->order.order, TSQL_SO_DESC); + + pQuery->order.order = TSQL_SO_DESC; + return; + } + + if (isPointInterpoQuery(pQuery) && pQuery->nAggTimeInterval == 0) { + if (!QUERY_IS_ASC_QUERY(pQuery)) { + dTrace(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSQL_SO_ASC, pQuery->skey, pQuery->ekey, + pQuery->ekey, pQuery->skey); + SWAP(pQuery->skey, pQuery->ekey); + } + + pQuery->order.order = TSQL_SO_ASC; + return; + } + + if (pQuery->nAggTimeInterval == 0) { + if (onlyFirstQuery(pQuery)) { + if (!QUERY_IS_ASC_QUERY(pQuery)) { + dTrace(msg, GET_QINFO_ADDR(pQuery), "only-first", pQuery->order.order, TSQL_SO_ASC, pQuery->skey, pQuery->ekey, + pQuery->ekey, pQuery->skey); + + SWAP(pQuery->skey, pQuery->ekey); + } + + pQuery->order.order = TSQL_SO_ASC; + } else if (onlyLastQuery(pQuery)) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + dTrace(msg, GET_QINFO_ADDR(pQuery), "only-last", pQuery->order.order, TSQL_SO_DESC, pQuery->skey, pQuery->ekey, + pQuery->ekey, pQuery->skey); + + SWAP(pQuery->skey, pQuery->ekey); + } + + pQuery->order.order = TSQL_SO_DESC; + } + + } else { // interval query + if (metricQuery) { + if (onlyFirstQuery(pQuery)) { + if (!QUERY_IS_ASC_QUERY(pQuery)) { + dTrace(msg, GET_QINFO_ADDR(pQuery), "only-first stable", pQuery->order.order, TSQL_SO_ASC, pQuery->skey, + pQuery->ekey, pQuery->ekey, pQuery->skey); + + SWAP(pQuery->skey, pQuery->ekey); + } + + pQuery->order.order = TSQL_SO_ASC; + } else if (onlyLastQuery(pQuery)) { + if (QUERY_IS_ASC_QUERY(pQuery)) { + dTrace(msg, GET_QINFO_ADDR(pQuery), "only-last stable", pQuery->order.order, TSQL_SO_DESC, pQuery->skey, + pQuery->ekey, pQuery->ekey, pQuery->skey); + + SWAP(pQuery->skey, pQuery->ekey); + } + + pQuery->order.order = TSQL_SO_DESC; + } + } + } +} + +static int32_t doSkipDataBlock(SQueryRuntimeEnv *pRuntimeEnv) { + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + SQuery * pQuery = pRuntimeEnv->pQuery; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + + while (1) { + int32_t ret = moveToNextBlock(pRuntimeEnv, step, searchFn, false); + UNUSED(ret); + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { + break; + } + + void *pBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + + int32_t blockType = IS_DISK_DATA_BLOCK(pQuery) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; + SBlockInfo blockInfo = getBlockBasicInfo(pBlock, blockType); + + int32_t maxReads = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.size - pQuery->pos : pQuery->pos + 1; + + if (pQuery->limit.offset < maxReads) { // start position in current block + updateOffsetVal(pRuntimeEnv, pBlock); + break; + } else { + pQuery->limit.offset -= maxReads; + pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.keyLast : blockInfo.keyFirst; + pQuery->lastKey += step; + } + } + + return 0; +} + +void forwardQueryStartPosition(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + if (pQuery->limit.offset <= 0) { + return; + } + + void * pBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + int32_t blockType = (IS_DISK_DATA_BLOCK(pQuery)) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; + + SBlockInfo blockInfo = getBlockBasicInfo(pBlock, blockType); + int32_t maxReads = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.size - pQuery->pos : pQuery->pos + 1; + + if (pQuery->limit.offset < maxReads) { // start position in current block + updateOffsetVal(pRuntimeEnv, pBlock); + } else { + pQuery->limit.offset -= maxReads; + doSkipDataBlock(pRuntimeEnv); + } +} + +static bool forwardQueryStartPosIfNeeded(SQInfo *pQInfo, SMeterQuerySupportObj *pSupporter, bool dataInDisk, + bool dataInCache) { + SQuery *pQuery = &pQInfo->query; + + /* if queried with value filter, do NOT forward query start position */ + if (pQuery->numOfFilterCols > 0) { + return true; + } + + if (pQuery->limit.offset > 0 && (!isTopBottomQuery(pQuery)) && pQuery->interpoType == TSDB_INTERPO_NONE) { + /* + * 1. for top/bottom query, the offset applies to the final result, not here + * 2. for interval without interpolation query we forward pQuery->nAggTimeInterval at a time for + * pQuery->limit.offset times. Since hole exists, pQuery->nAggTimeInterval*pQuery->limit.offset value is + * not valid. otherwise, we only forward pQuery->limit.offset number of points + */ + if (pQuery->nAggTimeInterval > 0) { + while (1) { + /* + * the skey may not be the aligned start time + * 1. it is the value of first existed data point, therefore, the range + * between skey and ekey may be less than the interval value. + * 2. the ekey may not be the actual end value of time interval, in case of the + */ + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->skey = pQuery->ekey + 1; + } else { + pQuery->skey = pQuery->ekey - 1; + } + + // boundary check + if ((pQuery->skey > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->skey < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + + sem_post(&pQInfo->dataReady); // hack for next read for empty return + pQInfo->over = 1; + return false; + } + + /* + * NOTE: the end key must be set the last value, to cover all possible data. Otherwise, it may + * contain no data with only one interval time range + */ + pQuery->ekey = pSupporter->rawEKey; + pQuery->lastKey = pQuery->skey; + + // todo opt performance + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, NULL) == false) { + sem_post(&pQInfo->dataReady); // hack for next read for empty return + pQInfo->over = 1; + return false; + } + + if (--pQuery->limit.offset == 0) { + break; + } + } + } else { + forwardQueryStartPosition(&pSupporter->runtimeEnv); + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { + setQueryStatus(pQuery, QUERY_COMPLETED); + + sem_post(&pQInfo->dataReady); // hack for next read for empty return + pQInfo->over = 1; + return false; + } + } + } + + return true; +} + +/** + * param[0]: tags. reserved for tags + * param[1]: default value/previous value of specified timestamp + * param[2]: next value of specified timestamp + * param[3]: denotes if the result is a precious result or interpolation results + * + * intermediate[0]: interpolation type + * intermediate[1]: precious specified timestamp, the pCtx->startTimetamp is + * changed during query to satisfy the query procedure + * intermediate[2]: flag that denotes if it is a primary timestamp column or not + * + * todo refactor + * @param pQInfo + * @param pSupporter + * @param pInterpoRaw + */ +void pointInterpSupporterSetData(SQInfo *pQInfo, SPointInterpoSupporter *pPointInterpSupport) { + /* not point interpolation query, abort */ + if (!isPointInterpoQuery(&pQInfo->query)) { + return; + } + + SQuery * pQuery = &pQInfo->query; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + + int32_t count = 1; + TSKEY key = *(TSKEY *)pPointInterpSupport->pNextPoint[0]; + + if (key == pSupporter->rawSKey) { + /* the queried timestamp has value, return it directly without interpolation + */ + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[3], (char *)&count, sizeof(count), TSDB_DATA_TYPE_INT); + + pRuntimeEnv->pCtx[i].intermediateBuf[1].i64Key = key; + pRuntimeEnv->pCtx[i].intermediateBuf[1].nType = TSDB_DATA_TYPE_BIGINT; + } + } else { + /* set the direct previous(next) point for process */ + count = 2; + + if (pQuery->interpoType == TSDB_INTERPO_SET_VALUE) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; + + /* for primary timestamp column, set the flag*/ + if (pQuery->pSelectExpr[i].pBase.colInfo.colId == 0) { + pCtx->intermediateBuf[2].i64Key = 1; + pCtx->intermediateBuf[2].nType = TSDB_DATA_TYPE_BIGINT; + } + + tVariantCreateB(&pCtx->param[3], (char *)&count, sizeof(count), TSDB_DATA_TYPE_INT); + tVariantCreateB(&pCtx->param[1], (char *)&pQuery->defaultVal[i], pCtx->inputBytes, pCtx->inputType); + pCtx->intermediateBuf[1].i64Key = pSupporter->rawSKey; + pCtx->intermediateBuf[1].nType = TSDB_DATA_TYPE_BIGINT; + + /* the interpolation type is set in intermediateBuf[0] */ + tVariantCreateB(&pCtx->intermediateBuf[0], (char *)&pQuery->interpoType, sizeof(pQuery->interpoType), + TSDB_DATA_TYPE_SMALLINT); + } + } else { + TSKEY prevKey = *(TSKEY *)pPointInterpSupport->pPrevPoint[0]; + TSKEY nextKey = *(TSKEY *)pPointInterpSupport->pNextPoint[0]; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t colInBuf = pQuery->pSelectExpr[i].pBase.colInfo.colIdxInBuf; + int32_t type = GET_COLUMN_TYPE(pQuery, i); + + char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; + int32_t len = 0; + + /* for primary timestamp column, set the flag*/ + if (pQuery->pSelectExpr[i].pBase.colInfo.colId == 0) { + pRuntimeEnv->pCtx[i].intermediateBuf[2].i64Key = 1; + pRuntimeEnv->pCtx[i].intermediateBuf[2].nType = TSDB_DATA_TYPE_BIGINT; + } else { + if ((type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_BIGINT) || type == TSDB_DATA_TYPE_TIMESTAMP) { + len = sprintf(tmp, "%ld,%ld", prevKey, *(int64_t *)pPointInterpSupport->pPrevPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], tmp, len, TSDB_DATA_TYPE_BINARY); + + len = sprintf(tmp, "%ld,%ld", nextKey, *(int64_t *)pPointInterpSupport->pNextPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[2], tmp, len, TSDB_DATA_TYPE_BINARY); + } else if (type == TSDB_DATA_TYPE_FLOAT) { + if (isNull(pPointInterpSupport->pPrevPoint[colInBuf], type)) { + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], TSDB_DATA_NULL_STR_L, strlen(TSDB_DATA_NULL_STR_L), + TSDB_DATA_TYPE_BINARY); + } else { + len = sprintf(tmp, "%ld,%.9f", prevKey, *(float *)pPointInterpSupport->pPrevPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], tmp, len, TSDB_DATA_TYPE_BINARY); + } + + if (isNull(pPointInterpSupport->pNextPoint[colInBuf], type)) { + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], TSDB_DATA_NULL_STR_L, strlen(TSDB_DATA_NULL_STR_L), + TSDB_DATA_TYPE_BINARY); + } else { + len = sprintf(tmp, "%ld,%.9f", nextKey, *(float *)pPointInterpSupport->pNextPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[2], tmp, len, TSDB_DATA_TYPE_BINARY); + } + } else if (type == TSDB_DATA_TYPE_DOUBLE) { + if (isNull(pPointInterpSupport->pPrevPoint[colInBuf], type)) { + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], TSDB_DATA_NULL_STR_L, strlen(TSDB_DATA_NULL_STR_L), + TSDB_DATA_TYPE_BINARY); + } else { + len = sprintf(tmp, "%ld,%.9f", prevKey, *(double *)pPointInterpSupport->pPrevPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], tmp, len, TSDB_DATA_TYPE_BINARY); + } + + if (isNull(pPointInterpSupport->pPrevPoint[colInBuf], type)) { + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], TSDB_DATA_NULL_STR_L, strlen(TSDB_DATA_NULL_STR_L), + TSDB_DATA_TYPE_BINARY); + } else { + len = sprintf(tmp, "%ld,%.9f", nextKey, *(double *)pPointInterpSupport->pNextPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[2], tmp, len, TSDB_DATA_TYPE_BINARY); + } + } else if (type == TSDB_DATA_TYPE_BINARY) { + len = sprintf(tmp, "%ld,%s", prevKey, pPointInterpSupport->pPrevPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], tmp, len, TSDB_DATA_TYPE_BINARY); + + len = sprintf(tmp, "%ld,%s", nextKey, pPointInterpSupport->pNextPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[2], tmp, len, TSDB_DATA_TYPE_BINARY); + } else if (type == TSDB_DATA_TYPE_NCHAR) { + int32_t maxLen = TSDB_MAX_BYTES_PER_ROW / TSDB_NCHAR_SIZE; + len = swprintf((wchar_t *)tmp, maxLen, L"%ld,%ls", prevKey, pPointInterpSupport->pPrevPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[1], tmp, len * TSDB_NCHAR_SIZE, TSDB_DATA_TYPE_NCHAR); + + len = swprintf((wchar_t *)tmp, maxLen, L"%ld,%ls", nextKey, pPointInterpSupport->pNextPoint[colInBuf]); + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[2], tmp, len * TSDB_NCHAR_SIZE, TSDB_DATA_TYPE_NCHAR); + } + } + + tVariantCreateB(&pRuntimeEnv->pCtx[i].param[3], (char *)&count, sizeof(count), TSDB_DATA_TYPE_INT); + + /* the interpolation type is set in intermediateBuf[0] */ + tVariantCreateB(&pRuntimeEnv->pCtx[i].intermediateBuf[0], (char *)&pQuery->interpoType, + sizeof(pQuery->interpoType), TSDB_DATA_TYPE_SMALLINT); + pRuntimeEnv->pCtx[i].intermediateBuf[1].i64Key = pSupporter->rawSKey; + pRuntimeEnv->pCtx[i].intermediateBuf[1].nType = TSDB_DATA_TYPE_BIGINT; + } + } + } +} + +void pointInterpSupporterInit(SQuery *pQuery, SPointInterpoSupporter *pInterpoSupport) { + if (isPointInterpoQuery(pQuery)) { + pInterpoSupport->pPrevPoint = malloc(pQuery->numOfCols * sizeof(void *)); + pInterpoSupport->pNextPoint = malloc(pQuery->numOfCols * sizeof(void *)); + + pInterpoSupport->numOfCols = pQuery->numOfCols; + + /* get appropriated size for one row data source*/ + int32_t len = 0; + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + len += pQuery->colList[i].data.bytes; + } + + assert(PRIMARY_TSCOL_LOADED(pQuery)); + + void *prev = calloc(1, len); + void *next = calloc(1, len); + + int32_t offset = 0; + + for (int32_t i = 0, j = 0; i < pQuery->numOfCols; ++i, ++j) { + pInterpoSupport->pPrevPoint[j] = prev + offset; + pInterpoSupport->pNextPoint[j] = next + offset; + + offset += pQuery->colList[i].data.bytes; + } + } +} + +void pointInterpSupporterDestroy(SPointInterpoSupporter *pPointInterpSupport) { + if (pPointInterpSupport->numOfCols <= 0 || pPointInterpSupport->pPrevPoint == NULL) { + return; + } + + tfree(pPointInterpSupport->pPrevPoint[0]); + tfree(pPointInterpSupport->pNextPoint[0]); + + tfree(pPointInterpSupport->pPrevPoint); + tfree(pPointInterpSupport->pNextPoint); + + pPointInterpSupport->numOfCols = 0; +} + +static void allocMemForInterpo(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, SMeterObj *pMeterObj) { + if (pQuery->interpoType != TSDB_INTERPO_NONE) { + assert(pQuery->nAggTimeInterval > 0 || (pQuery->nAggTimeInterval == 0 && isPointInterpoQuery(pQuery))); + + if (pQuery->nAggTimeInterval > 0) { + pSupporter->runtimeEnv.pInterpoBuf = malloc(POINTER_BYTES * pQuery->numOfOutputCols); + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + pSupporter->runtimeEnv.pInterpoBuf[i] = + calloc(1, sizeof(tFilePage) + pQuery->pSelectExpr[i].resBytes * pMeterObj->pointsPerFileBlock); + } + } + } +} + +int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMeterQuerySupportObj *pSupporter) { + SQuery *pQuery = &pQInfo->query; + + if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || + (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { + + dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + pQuery->order.order); + + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + return TSDB_CODE_SUCCESS; + } + + setScanLimitationByResultBuffer(pQuery); + rewriteExecOrder(pQuery, false); + + pQInfo->over = 0; + pQInfo->pointsRead = 0; + pQuery->pointsRead = 0; + + // dataInCache requires lastKey value + pQuery->lastKey = pQuery->skey; + + vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); + vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); + + // check data in file or cache + bool dataInCache = true; + bool dataInDisk = true; + pSupporter->runtimeEnv.pQuery = pQuery; + + vnodeCheckIfDataExists(&pSupporter->runtimeEnv, pMeterObj, &dataInDisk, &dataInCache); + + /* data in file or cache is not qualified for the query. abort */ + if (!(dataInCache || dataInDisk)) { + dTrace("QInfo:%p no result in query", pQInfo); + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + return TSDB_CODE_SUCCESS; + } + + /* create runtime environment */ + int32_t ret = setupQueryRuntimeEnv(pMeterObj, pQuery, &pSupporter->runtimeEnv, NULL, pQuery->order.order); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + vnodeGetFilesSnapshot(pQInfo, pMeterObj->vnode); + + // in case of last_row query, we set the query timestamp to pMeterObj->lastKey; + if (isFirstLastRowQuery(pQuery)) { + pQuery->skey = pMeterObj->lastKey; + pQuery->ekey = pMeterObj->lastKey; + pQuery->lastKey = pQuery->skey; + } + + pSupporter->rawSKey = pQuery->skey; + pSupporter->rawEKey = pQuery->ekey; + + /* query on single table */ + pSupporter->numOfMeters = 1; + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + + SPointInterpoSupporter interpInfo = {0}; + pointInterpSupporterInit(pQuery, &interpInfo); + + if ((normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &interpInfo) == false) || + (isFixedOutputQuery(pQuery) && !isTopBottomQuery(pQuery) && (pQuery->limit.offset > 0)) || + (isTopBottomQuery(pQuery) && pQuery->limit.offset >= pQuery->pSelectExpr[1].pBase.arg[0].argValue.i64)) { + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + pointInterpSupporterDestroy(&interpInfo); + return TSDB_CODE_SUCCESS; + } + + /* + * here we set the value for before and after the specified time into the + * parameter for + * interpolation query + */ + pointInterpSupporterSetData(pQInfo, &interpInfo); + pointInterpSupporterDestroy(&interpInfo); + + if (!forwardQueryStartPosIfNeeded(pQInfo, pSupporter, dataInDisk, dataInCache)) { + return TSDB_CODE_SUCCESS; + } + + int64_t rs = taosGetIntervalStartTimestamp(pSupporter->rawSKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit); + taosInitInterpoInfo(&pSupporter->runtimeEnv.interpoInfo, pQuery->order.order, rs, 0, 0); + allocMemForInterpo(pSupporter, pQuery, pMeterObj); + + if (!isPointInterpoQuery(pQuery)) { + assert(pQuery->pos >= 0 && pQuery->slot >= 0); + } + + // the pQuery->skey is changed during normalizedFirstQueryRange, so set the newest lastkey value + pQuery->lastKey = pQuery->skey; + return TSDB_CODE_SUCCESS; +} + +void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { + if (pQInfo == NULL || pQInfo->pMeterQuerySupporter == NULL) { + return; + } + + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + teardownQueryRuntimeEnv(&pSupporter->runtimeEnv); + tfree(pSupporter->pMeterSidExtInfo); + + if (pSupporter->pMeterObj != NULL) { + taosCleanUpIntHash(pSupporter->pMeterObj); + pSupporter->pMeterObj = NULL; + } + + if (pSupporter->pSidSet != NULL) { + for (int32_t i = 0; i < pSupporter->pSidSet->numOfSubSet; ++i) { + destroyBuf(pSupporter->pResult[i].result, pQInfo->query.numOfOutputCols); + } + } + + if (VALIDFD(pSupporter->meterOutputFd)) { + assert(pSupporter->meterOutputMMapBuf != NULL); + dTrace("QInfo:%p disk-based output buffer during query:%lld bytes", pQInfo, pSupporter->bufSize); + munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); + tclose(pSupporter->meterOutputFd); + + unlink(pSupporter->extBufFile); + } + + tSidSetDestroy(&pSupporter->pSidSet); + + if (pSupporter->pMeterDataInfo != NULL) { + for (int32_t j = 0; j < pSupporter->numOfMeters; ++j) { + destroyMeterQueryInfo(pSupporter->pMeterDataInfo[j].pMeterQInfo); + free(pSupporter->pMeterDataInfo[j].pBlock); + } + } + + tfree(pSupporter->pMeterDataInfo); + + tfree(pSupporter->pResult); + tfree(pQInfo->pMeterQuerySupporter); +} + +int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || + (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { + + dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + pQuery->order.order); + + sem_post(&pQInfo->dataReady); + pQInfo->over = 1; + + return TSDB_CODE_SUCCESS; + } + + pQInfo->over = 0; + pQInfo->pointsRead = 0; + pQuery->pointsRead = 0; + + rewriteExecOrder(pQuery, true); + + vnodeInitDataBlockInfo(&pSupporter->runtimeEnv.loadBlockInfo); + vnodeInitLoadCompBlockInfo(&pSupporter->runtimeEnv.loadCompBlockInfo); + + /* + * since we employ the output control mechanism in main loop. + * so, disable it during data block scan procedure. + */ + setScanLimitationByResultBuffer(pQuery); + + // save raw query range for applying to each subgroup + pSupporter->rawEKey = pQuery->ekey; + pSupporter->rawSKey = pQuery->skey; + pQuery->lastKey = pQuery->skey; + + /* create runtime environment */ + SSchema *pTagSchema = NULL; + + tTagSchema *pTagSchemaInfo = pSupporter->pSidSet->pTagSchema; + if (pTagSchemaInfo != NULL) { + pTagSchema = pTagSchemaInfo->pSchema; + } + + /* get one queried meter */ + SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[0]->sid); + + int32_t ret = setupQueryRuntimeEnv(pMeter, pQuery, &pSupporter->runtimeEnv, pTagSchema, TSQL_SO_ASC); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + tSidSetSort(pSupporter->pSidSet); + + vnodeGetFilesSnapshot(pQInfo, pMeter->vnode); + pSupporter->pResult = calloc(1, sizeof(SOutputRes) * pSupporter->pSidSet->numOfSubSet); + if (pSupporter->pResult == NULL) { + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + /* create group result buffer */ + for (int32_t k = 0; k < pSupporter->pSidSet->numOfSubSet; ++k) { + SOutputRes *pOneRes = &pSupporter->pResult[k]; + pOneRes->nAlloc = 1; + pOneRes->result = createInMemGroupResultBuf(pSupporter->runtimeEnv.pCtx, pQuery->numOfOutputCols, pOneRes->nAlloc); + } + + if (pQuery->nAggTimeInterval != 0) { + getExtTmpfilePath("/tb_metric_mmap_%lld_%lld_%d_%d", pthread_self(), 0, 0, pSupporter->extBufFile); + pSupporter->meterOutputFd = open(pSupporter->extBufFile, O_CREAT | O_RDWR, 0666); + + if (!VALIDFD(pSupporter->meterOutputFd)) { + dError("QInfo:%p failed to create file: %s on disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + + // set 4k page for each meter + pSupporter->numOfPages = pSupporter->numOfMeters; + + ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); + pSupporter->runtimeEnv.numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / pQuery->rowSize; + pSupporter->lastPageId = -1; + pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; + + pSupporter->meterOutputMMapBuf = + mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0); + if (pSupporter->meterOutputMMapBuf == MAP_FAILED) { + dError("QInfo:%p failed to map data file: %s to disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno)); + return TSDB_CODE_SERV_OUT_OF_MEMORY; + } + } + + /* metric query do not invoke interpolation, it will be done at the + * second-stage merge */ + if (!isPointInterpoQuery(pQuery)) { + pQuery->interpoType = TSDB_INTERPO_NONE; + } + + TSKEY revisedStime = + taosGetIntervalStartTimestamp(pSupporter->rawSKey, pQuery->nAggTimeInterval, pQuery->intervalTimeUnit); + taosInitInterpoInfo(&pSupporter->runtimeEnv.interpoInfo, pQuery->order.order, revisedStime, 0, 0); + + return TSDB_CODE_SUCCESS; +} + +/** + * decrease the refcount for each table involved in this query + * @param pQInfo + */ +void vnodeDecMeterRefcnt(SQInfo *pQInfo) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + if (pSupporter == NULL || pSupporter->numOfMeters == 1) { + __sync_fetch_and_sub(&pQInfo->pObj->numOfQueries, 1); + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pQInfo->pObj->vnode, + pQInfo->pObj->sid, pQInfo->pObj->meterId, pQInfo->pObj->numOfQueries); + } else { + int32_t num = 0; + for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { + SMeterObj *pMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[i]->sid); + __sync_fetch_and_sub(&(pMeter->numOfQueries), 1); + + if (pMeter->numOfQueries > 0) { + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, query is over, numOfQueries:%d", pQInfo, pMeter->vnode, pMeter->sid, + pMeter->meterId, pMeter->numOfQueries); + num++; + } + } + + /* + * in order to reduce log output, for all meters of which numOfQueries count are 0, + * we do not output corresponding information + */ + num = pSupporter->numOfMeters - num; + dTrace("QInfo:%p metric query is over, dec query ref for %d meters, numOfQueries on %d meters are 0", + pQInfo, pSupporter->numOfMeters, num); + } +} + +// todo merge with doRevisedResultsByLimit +void UNUSED_FUNC truncateResultByLimit(SQInfo *pQInfo, int64_t * final, int32_t *interpo) { + SQuery *pQuery = &(pQInfo->query); + + if (pQuery->limit.limit > 0 && ((* final) + pQInfo->pointsRead > pQuery->limit.limit)) { + int64_t num = (* final) + pQInfo->pointsRead - pQuery->limit.limit; + (*interpo) -= num; + (* final) -= num; + + setQueryStatus(pQuery, QUERY_COMPLETED); // query completed + } +} + +TSKEY getTimestampInCacheBlock(SCacheBlock *pBlock, int32_t index) { + if (pBlock == NULL || index >= pBlock->numOfPoints || index < 0) { + return -1; + } + + TSKEY *ts = (TSKEY *)pBlock->offset[0]; + return ts[index]; +} + +/* + * NOTE: pQuery->pos will not change, the corresponding data block will be loaded into buffer + * loadDataBlockOnDemand will change the value of pQuery->pos, according to the pQuery->lastKey + * */ +TSKEY getTimestampInDiskBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t index) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + /* + * the corresponding compblock info has been loaded already + * todo add check for compblock loaded + */ + SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); + + // this block must be loaded into buffer + SQueryLoadBlockInfo *pLoadInfo = &pRuntimeEnv->loadBlockInfo; + assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints); + + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + bool loadTimestamp = true; + int32_t fileId = pQuery->fileId; + int32_t fileIndex = vnodeGetVnodeHeaderFileIdx(&fileId, pRuntimeEnv, pQuery->order.order); + + if (!vnodeIsDatablockLoaded(pRuntimeEnv, pMeterObj, fileIndex)) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, fileId:%d, slot:%d load data block due to primary key required", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot); + + // todo handle failed to load data, file corrupted + // todo refactor the return value + int32_t ret = + loadDataBlockIntoMem(pBlock, &pQuery->pFields[pQuery->slot], pRuntimeEnv, fileIndex, loadTimestamp, true); + UNUSED(ret); + } + + SET_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus); + SET_FILE_BLOCK_FLAG(pRuntimeEnv->blockStatus); + + assert(pQuery->fileId == pLoadInfo->fileId && pQuery->slot == pLoadInfo->slotIdx); + return ((TSKEY *)pRuntimeEnv->primaryColBuffer->data)[index]; +} + +// todo remove this function +static void getFirstDataBlockInCache(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + assert(pQuery->fileId == -1 && QUERY_IS_ASC_QUERY(pQuery)); + + /* + * get the start position in cache according to the pQuery->lastkey + * + * In case of cache and disk file data overlaps and all required data are commit to disk file, + * there are no qualified data available in cache, we need to set the QUERY_COMPLETED flag. + * + * If cache data and disk-based data are not completely overlapped, cacheBoundaryCheck function will set the + * correct status flag. + */ + TSKEY nextTimestamp = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, true); + if (nextTimestamp < 0) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + } else if (nextTimestamp > pQuery->ekey) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } +} + +void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_search_fn_t searchFn) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + dTrace("QInfo:%p vid:%d sid:%d id:%s cache block re-allocated to other meter, " + "try get query start position in file/cache, qrange:%lld-%lld, lastKey:%lld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); + + if (step == QUERY_DESC_FORWARD_STEP) { + /* + * In descending order query, if the cache is invalid, it must be flushed to disk. + * Try to find the appropriate position in file, and no need to search cache any more. + */ + bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); + + dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position, fileId:%d, slot:%d, pos:%d", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); + + if (ret) { + TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + + // key in query range. If not, no qualified in disk file + if (key < pQuery->ekey) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } + } else { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + } + } else { + bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_GREATER_EQUAL, searchFn); + if (ret) { + dTrace("QInfo:%p vid:%d sid:%d id:%s find the possible position, fileId:%d, slot:%d, pos:%d", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); + + TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + + // key in query range. If not, no qualified in disk file + if (key > pQuery->ekey) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } + } else { + /* + * all data in file is less than the pQuery->lastKey, try cache. + * cache block status will be set in getFirstDataBlockInCache function + */ + getFirstDataBlockInCache(pRuntimeEnv); + + dTrace("QInfo:%p vid:%d sid:%d id:%s find the new position in cache, fileId:%d, slot:%d, pos:%d", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, pQuery->slot, pQuery->pos); + } + } +} + +static int32_t moveToNextBlockInCache(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + + assert(pQuery->fileId < 0); + + /* + * ascending order to last cache block all data block in cache have been iterated, no need to set + * pRuntimeEnv->nextPos. done + */ + if (step == QUERY_ASC_FORWARD_STEP && pQuery->slot == pQuery->currentSlot) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return DISK_DATA_LOADED; + } + + /* + * descending order to first cache block, try file + * NOTE: use the real time cache information, not the snapshot + */ + int32_t numOfBlocks = pCacheInfo->numOfBlocks; + int32_t currentSlot = pCacheInfo->currentSlot; + + int32_t firstSlot = getFirstCacheSlot(numOfBlocks, currentSlot, pCacheInfo); + if (step == QUERY_DESC_FORWARD_STEP && pQuery->slot == firstSlot) { + bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); + if (ret) { + TSKEY key = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); + + // key in query range. If not, no qualified in disk file + if (key < pQuery->ekey) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } + + assert(pRuntimeEnv->startPos.fileId < 0); + + } else { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + } + return DISK_DATA_LOADED; + } + + /* now still iterate the cache data blocks */ + pQuery->slot = (pQuery->slot + step + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + + /* + * data in this cache block has been flushed to disk, then we should locate the start position in file. + * In both desc/asc query, this situation may occur. And we need to locate the start query position in file or cache. + */ + if (pBlock == NULL) { + getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + + return DISK_DATA_LOADED; + } else { + pQuery->pos = (QUERY_IS_ASC_QUERY(pQuery)) ? 0 : pBlock->numOfPoints - 1; + + TSKEY startkey = getTimestampInCacheBlock(pBlock, pQuery->pos); + if (startkey < 0) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } + + SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); + + dTrace("QInfo:%p check cache block, blockId:%d slot:%d pos:%d, blockstatus:%d", GET_QINFO_ADDR(pQuery), + pQuery->blockId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus); + } + + return DISK_DATA_LOADED; +} + +/** + * move the cursor to next block and not load + */ +static int32_t moveToNextBlock(SQueryRuntimeEnv *pRuntimeEnv, int32_t step, __block_search_fn_t searchFn, + bool loadData) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + + SET_DATA_BLOCK_NOT_LOADED(pRuntimeEnv->blockStatus); + + if (pQuery->fileId >= 0) { + int32_t fileIndex = -1; + + /* + * 1. ascending order. The last data block of data file + * 2. descending order. The first block of file + */ + if ((step == QUERY_ASC_FORWARD_STEP && (pQuery->slot == pQuery->numOfBlocks - 1)) || + (step == QUERY_DESC_FORWARD_STEP && (pQuery->slot == 0))) { + fileIndex = getNextDataFileCompInfo(pRuntimeEnv, pMeterObj, step); + /* data maybe in cache */ + if (fileIndex < 0) { + assert(pQuery->fileId == -1); + if (step == QUERY_ASC_FORWARD_STEP) { + getFirstDataBlockInCache(pRuntimeEnv); + } else { /* no data any more */ + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + } + + return DISK_DATA_LOADED; + } else { + pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; + pQuery->slot = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->numOfBlocks - 1; + } + } else { // next block in the same file + int32_t fid = pQuery->fileId; + fileIndex = vnodeGetVnodeHeaderFileIdx(&fid, pRuntimeEnv, pQuery->order.order); + pQuery->slot += step; + pQuery->pos = (step == QUERY_ASC_FORWARD_STEP) ? 0 : pQuery->pBlock[pQuery->slot].numOfPoints - 1; + } + + assert(pQuery->pBlock != NULL); + + /* no need to load data, return directly */ + if (!loadData) { + return DISK_DATA_LOADED; + } + + int32_t ret = + LoadDatablockOnDemand(&pQuery->pBlock[pQuery->slot], &pQuery->pFields[pQuery->slot], &pRuntimeEnv->blockStatus, + pRuntimeEnv, fileIndex, pQuery->slot, searchFn, true); + + if (ret != DISK_DATA_LOADED) { + /* + * if it is the last block of file, set current access position at the last point of the meter in this file, + * in order to get the correct next access point, + */ + return ret; + } + } else { // data in cache + return moveToNextBlockInCache(pRuntimeEnv, step, searchFn); + } + + return DISK_DATA_LOADED; +} + +static void doHandleFileBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, + SData **sdata, int32_t *numOfRes, int32_t blockLoadStatus, int32_t *forwardStep) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + + int64_t start = taosGetTimestampUs(); + + SCompBlock *pBlock = getDiskDataBlock(pQuery, pQuery->slot); + *pblockInfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + + TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; + + if (blockLoadStatus == DISK_DATA_LOADED) { + *forwardStep = applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, (char *)sdata, + pQuery->pFields[pQuery->slot], searchFn, numOfRes); + } else { + *forwardStep = pblockInfo->size; + } + + pSummary->fileTimeUs += (taosGetTimestampUs() - start); +} + +static void doHandleCacheBlockImpl(SQueryRuntimeEnv *pRuntimeEnv, SBlockInfo *pblockInfo, __block_search_fn_t searchFn, + int32_t *numOfRes, int32_t *forwardStep) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + + int64_t start = taosGetTimestampUs(); + + // todo refactor getCacheDataBlock. + //#ifdef _CACHE_INVALID_TEST + // taosMsleep(20000); + //#endif + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + while (pBlock == NULL) { + getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); + + if (IS_DISK_DATA_BLOCK(pQuery)) { // do check data block in file + break; + } else { + pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + } + } + + if (IS_DISK_DATA_BLOCK(pQuery)) { + // start query position is located in file, try query on file block + doHandleFileBlockImpl(pRuntimeEnv, pblockInfo, searchFn, pRuntimeEnv->colDataBuffer, numOfRes, DISK_DATA_LOADED, + forwardStep); + } else { // also query in cache block + *pblockInfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); + + TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0]; + *forwardStep = + applyFunctionsOnBlock(pRuntimeEnv, pblockInfo, primaryKeys, (char *)pBlock, NULL, searchFn, numOfRes); + + pSummary->cacheTimeUs += (taosGetTimestampUs() - start); + } +} + +static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + bool LOAD_DATA = true; + + int32_t forwardStep = 0; + int64_t cnt = 0; + + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + SData ** sdata = pRuntimeEnv->colDataBuffer; + + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; + int32_t blockLoadStatus = DISK_DATA_LOADED; + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + // initial data block always be loaded + SPositionInfo *pStartPos = &pRuntimeEnv->startPos; + assert(pQuery->slot == pStartPos->slot); + + dTrace("QInfo:%p query start, qrange:%lld-%lld, lastkey:%lld, order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", + GET_QINFO_ADDR(pQuery), pQuery->skey, pQuery->ekey, pQuery->lastKey, pQuery->order.order, pStartPos->fileId, + pStartPos->slot, pStartPos->pos, pRuntimeEnv->blockStatus); + + while (1) { + // check if query is killed or not set the status of query to pass the status check + if (isQueryKilled(pQuery)) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return cnt; + } + + int32_t numOfRes = 0; + SBlockInfo blockInfo = {0}; + + if (IS_DISK_DATA_BLOCK(pQuery)) { + doHandleFileBlockImpl(pRuntimeEnv, &blockInfo, searchFn, sdata, &numOfRes, blockLoadStatus, &forwardStep); + } else { + doHandleCacheBlockImpl(pRuntimeEnv, &blockInfo, searchFn, &numOfRes, &forwardStep); + } + + dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", + GET_QINFO_ADDR(pQuery), blockInfo.keyFirst, blockInfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, + pRuntimeEnv->blockStatus, blockInfo.size, forwardStep); + + // save last access position + int32_t accessPos = pQuery->pos + (forwardStep - 1) * step; + savePointPosition(&pRuntimeEnv->endPos, pQuery->fileId, pQuery->slot, accessPos); + + cnt += forwardStep; + + if (queryCompleteInBlock(pQuery, &blockInfo, forwardStep)) { + int32_t nextPos = accessPos + step; + + /* + * set the next access position, nextPos only required by + * 1. interval query. + * 2. multi-output query that may cause buffer overflow. + */ + if (pQuery->nAggTimeInterval > 0 || + (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL) && pQuery->checkBufferInLoop == 1)) { + // if (IS_DISK_DATA_BLOCK(pQuery)) { + // getNextQueryStartPos(&pRuntimeEnv->nextPos, + // pQuery->pBlock, pQuery->slot, accessPos, + // pRuntimeEnv); + // } else { + // getNextQueryStartPosInCache(&pRuntimeEnv->nextPos, + // pData, pQuery->slot, accessPos, pRuntimeEnv); + // } + + if (nextPos >= blockInfo.size || nextPos < 0) { + moveToNextBlock(pRuntimeEnv, step, searchFn, !LOAD_DATA); + + // slot/pos/fileId is updated in moveToNextBlock function + savePointPosition(&pRuntimeEnv->nextPos, pQuery->fileId, pQuery->slot, pQuery->pos); + } else { + savePointPosition(&pRuntimeEnv->nextPos, pQuery->fileId, pQuery->slot, accessPos + step); + } + } + break; + } else { // query not completed, move to next block + int64_t start = taosGetTimestampUs(); + + blockLoadStatus = moveToNextBlock(pRuntimeEnv, step, searchFn, LOAD_DATA); + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { + savePointPosition(&pRuntimeEnv->nextPos, pQuery->fileId, pQuery->slot, pQuery->pos); + setQueryStatus(pQuery, QUERY_COMPLETED); + break; + } + + int64_t delta = (taosGetTimestampUs() - start); + if (IS_DISK_DATA_BLOCK(pQuery)) { + pSummary->fileTimeUs += delta; + } else { + pSummary->cacheTimeUs += delta; + } + } + + // check next block + void *pNextBlock = getGenericDataBlock(pMeterObj, pQuery, pQuery->slot); + + int32_t blockType = (IS_DISK_DATA_BLOCK(pQuery)) ? BLK_FILE_BLOCK : BLK_CACHE_BLOCK; + blockInfo = getBlockBasicInfo(pNextBlock, blockType); + if (!checkQueryRangeAgainstNextBlock(&blockInfo, pRuntimeEnv)) { + break; + } + } // while(1) + + return cnt; +} + +static void updatelastkey(SQuery *pQuery, SMeterQueryInfo *pMeterQInfo) { pMeterQInfo->lastKey = pQuery->lastKey; } + +void queryOnBlock(SMeterQuerySupportObj *pSupporter, int64_t *primaryKeys, int32_t blockStatus, char *data, + SBlockInfo *pBlockBasicInfo, SMeterDataInfo *pDataHeadInfoEx, SField *pFields, + __block_search_fn_t searchFn) { + /* cache blocks may be assign to other meter, abort */ + if (pBlockBasicInfo->size <= 0) { + return; + } + + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + if (pQuery->nAggTimeInterval == 0) { // not interval query + int32_t numOfRes = 0; + applyFunctionsOnBlock(pRuntimeEnv, pBlockBasicInfo, primaryKeys, data, pFields, searchFn, &numOfRes); + + // note: only fixed number of output for each group by operation + if (numOfRes > 0) { + pSupporter->pResult[pDataHeadInfoEx->groupIdx].numOfRows = numOfRes; + } + + /* used to decide the correct start position in cache after check all data in files */ + updatelastkey(pQuery, pDataHeadInfoEx->pMeterQInfo); + } else { + applyIntervalQueryOnBlock(pSupporter, pDataHeadInfoEx, data, primaryKeys, pBlockBasicInfo, blockStatus, pFields, + searchFn); + } +} + +/* + * set tag value in SQLFunctionCtx + * e.g.,tag information into input buffer + */ +static void doSetTagValueInParam(tTagSchema *pTagSchema, int32_t tagColIdx, SMeterSidExtInfo *pMeterSidInfo, + tVariant *param) { + assert(tagColIdx >= 0); + + int32_t *fieldValueOffset = pTagSchema->colOffset; + + void * pStr = (char *)pMeterSidInfo->tags + fieldValueOffset[tagColIdx]; + SSchema *pCol = &pTagSchema->pSchema[tagColIdx]; + + tVariantDestroy(param); + tVariantCreateB(param, pStr, pCol->bytes, pCol->type); +} + +void vnodeSetTagValueInParam(tSidSet *pSidSet, SQueryRuntimeEnv *pRuntimeEnv, SMeterSidExtInfo *pMeterInfo) { + SQuery * pQuery = pRuntimeEnv->pQuery; + tTagSchema *pTagSchema = pSidSet->pTagSchema; + + // set tag value, by which the results are aggregated. + for (int32_t idx = 0; idx < pQuery->numOfOutputCols; ++idx) { + SColIndexEx *pColEx = &pQuery->pSelectExpr[idx].pBase.colInfo; + if (!pColEx->isTag) { + continue; + } + + doSetTagValueInParam(pTagSchema, pColEx->colIdx, pMeterInfo, &pRuntimeEnv->pCtx[idx].intermediateBuf[3]); + } +} + +static void doMerge(SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, tFilePage *inputSrc, + int32_t inputIdx, int16_t *offset, int32_t maxRow, bool mergeFlag) { + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t funcId = pQuery->pSelectExpr[i].pBase.functionId; + if (!mergeFlag) { + pCtx[i].aOutputBuf = pCtx[i].aOutputBuf + pCtx[i].outputBytes; + aAggs[funcId].init(&pCtx[i]); + } + pCtx[i].hasNullValue = true; + pCtx[i].nStartQueryTimestamp = timestamp; + pCtx[i].aInputElemBuf = inputSrc->data + (offset[i] * maxRow) + pCtx[i].outputBytes * inputIdx; + + aAggs[funcId].distMergeFunc(&pCtx[i]); + } +} + +static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) { + if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || + functionId == TSDB_FUNC_LAST_DST) { + switch (srcDataType) { + case TSDB_DATA_TYPE_BINARY: + printf("%ld,%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BOOL: + printf("%ld,%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%ld,%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + printf("%ld,%ld\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_INT: + printf("%ld,%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%ld,%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%ld,%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); + break; + } + } else if (functionId == TSDB_FUNC_AVG_DST) { + printf("%lf,%d\t", *(double *)data, *(int32_t *)(data + sizeof(double))); + } else if (functionId == TSDB_FUNC_SPREAD_DST) { + printf("%lf,%lf\t", *(double *)data, *(double *)(data + sizeof(double))); + } else if (functionId == TSDB_FUNC_WAVG_DST) { + data += 1; + printf("%lf,%ld,%ld,%ld\t", *(double *)data, *(int64_t *)(data + 8), *(int64_t *)(data + 16), + *(int64_t *)(data + 24)); + } else if (functionId == TSDB_FUNC_MIN_DST || functionId == TSDB_FUNC_MAX_DST) { + switch (srcDataType) { + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BOOL: + printf("%d\t", *(int8_t *)data); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%d\t", *(int16_t *)data); + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + printf("%ld\t", *(int64_t *)data); + break; + case TSDB_DATA_TYPE_INT: + printf("%d\t", *(int *)data); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%f\t", *(float *)data); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%f\t", *(float *)data); + break; + } + } else if (functionId == TSDB_FUNC_SUM_DST) { + if (srcDataType == TSDB_DATA_TYPE_FLOAT || srcDataType == TSDB_DATA_TYPE_DOUBLE) { + printf("%lf\t", *(float *)data); + } else { + printf("%ld\t", *(int64_t *)data); + } + } else { + printf("%s\t", data); + } +} + +void UNUSED_FUNC displayInterResult(SData **pdata, SQuery *pQuery, int32_t numOfRows) { + int32_t numOfCols = pQuery->numOfOutputCols; + printf("metric query intern-result, total:%d\n", numOfRows); + + SQInfo * pQInfo = (SQInfo *)(GET_QINFO_ADDR(pQuery)); + SMeterObj *pMeterObj = pQInfo->pObj; + + for (int32_t j = 0; j < numOfRows; ++j) { + for (int32_t i = 0; i < numOfCols; ++i) { + switch (pQuery->pSelectExpr[i].resType) { + case TSDB_DATA_TYPE_BINARY: { + int32_t colIdx = pQuery->pSelectExpr[i].pBase.colInfo.colIdx; + int32_t type = 0; + + if (pQuery->pSelectExpr[i].pBase.colInfo.isTag) { + type = pQuery->pSelectExpr[i].resType; + } else { + type = pMeterObj->schema[colIdx].type; + } + printBinaryData(pQuery->pSelectExpr[i].pBase.functionId, pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j, + type); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + printf("%ld\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + break; + case TSDB_DATA_TYPE_INT: + printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%f\t", *(float *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%lf\t", *(double *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + break; + } + } + printf("\n"); + } +} + +static tFilePage *getFilePage(SMeterQuerySupportObj *pSupporter, int32_t pageId) { + assert(pageId <= pSupporter->lastPageId && pageId >= 0); + return (tFilePage *)(pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * pageId); +} + +static tFilePage *getMeterDataPage(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, int32_t pageId) { + SMeterQueryInfo *pInfo = pInfoEx->pMeterQInfo; + if (pageId >= pInfo->numOfPages) { + return NULL; + } + + int32_t realId = pInfo->pageList[pageId]; + return getFilePage(pSupporter, realId); +} + +typedef struct Position { + int32_t pageIdx; + int32_t rowIdx; +} Position; + +typedef struct SCompSupporter { + SMeterDataInfo ** pInfoEx; + Position * pPosition; + SMeterQuerySupportObj *pSupporter; +} SCompSupporter; + +int64_t getCurrentTimestamp(SCompSupporter *pSupportor, int32_t meterIdx) { + Position * pPos = &pSupportor->pPosition[meterIdx]; + tFilePage *pPage = getMeterDataPage(pSupportor->pSupporter, pSupportor->pInfoEx[meterIdx], pPos->pageIdx); + return *(int64_t *)(pPage->data + TSDB_KEYSIZE * pPos->rowIdx); +} + +int32_t meterResultComparator(const void *pLeft, const void *pRight, void *param) { + int32_t left = *(int32_t *)pLeft; + int32_t right = *(int32_t *)pRight; + + SCompSupporter *supportor = (SCompSupporter *)param; + + Position leftPos = supportor->pPosition[left]; + Position rightPos = supportor->pPosition[right]; + + /* left source is exhausted */ + if (leftPos.pageIdx == -1 && leftPos.rowIdx == -1) { + return 1; + } + + /* right source is exhausted*/ + if (rightPos.pageIdx == -1 && rightPos.rowIdx == -1) { + return -1; + } + + tFilePage *pPageLeft = getMeterDataPage(supportor->pSupporter, supportor->pInfoEx[left], leftPos.pageIdx); + int64_t leftTimestamp = *(int64_t *)(pPageLeft->data + TSDB_KEYSIZE * leftPos.rowIdx); + + tFilePage *pPageRight = getMeterDataPage(supportor->pSupporter, supportor->pInfoEx[right], rightPos.pageIdx); + int64_t rightTimestamp = *(int64_t *)(pPageRight->data + TSDB_KEYSIZE * rightPos.rowIdx); + + if (leftTimestamp == rightTimestamp) { + return 0; + } + + return leftTimestamp > rightTimestamp ? 1 : -1; +} + +int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + int64_t st = taosGetTimestampMs(); + + while (pSupporter->subgroupIdx < pSupporter->pSidSet->numOfSubSet) { + int32_t start = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx]; + int32_t end = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx + 1]; + + int32_t ret = + doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end); + pSupporter->subgroupIdx += 1; + + /* this group generates at least one result, return results */ + if (ret > 0) { + break; + } + + assert(pSupporter->numOfGroupResultPages == 0); + dTrace("QInfo:%p no result in group %d, continue", GET_QINFO_ADDR(pQuery), pSupporter->subgroupIdx - 1); + } + + dTrace("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%lldms", + GET_QINFO_ADDR(pQuery), pSupporter->subgroupIdx - 1, pSupporter->pSidSet->numOfSubSet, taosGetTimestampMs() - st); + + return pSupporter->numOfGroupResultPages; +} + +void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) { + if (pSupporter->offset == pSupporter->numOfGroupResultPages) { + pSupporter->numOfGroupResultPages = 0; + + // current results of group has been sent to client, try next group + mergeMetersResultToOneGroups(pSupporter); + + // set current query completed + if (pSupporter->numOfGroupResultPages == 0 && pSupporter->subgroupIdx == pSupporter->pSidSet->numOfSubSet) { + pSupporter->meterIdx = pSupporter->pSidSet->numOfSids; + return; + } + } + + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + char * pStart = pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * (pSupporter->lastPageId + 1) + + pSupporter->groupResultSize * pSupporter->offset; + + uint64_t numOfElem = ((tFilePage *)pStart)->numOfElems; + assert(numOfElem <= pQuery->pointsToRead); + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memcpy(pQuery->sdata[i], pStart, pRuntimeEnv->pCtx[i].outputBytes * numOfElem + sizeof(tFilePage)); + pStart += pRuntimeEnv->pCtx[i].outputBytes * pQuery->pointsToRead + sizeof(tFilePage); + } + + pQuery->pointsRead += numOfElem; + pSupporter->offset += 1; +} + +int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, SQueryRuntimeEnv *pRuntimeEnv, + SMeterDataInfo *pMeterHeadDataInfo, int32_t start, int32_t end) { + if (pSupporter->groupResultSize == 0) { + /* calculate the maximum required space */ + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + pSupporter->groupResultSize += sizeof(tFilePage) + pQuery->pointsToRead * pRuntimeEnv->pCtx[i].outputBytes; + } + } + + tFilePage ** buffer = (tFilePage **)pQuery->sdata; + Position * posArray = calloc(1, sizeof(Position) * (end - start)); + SMeterDataInfo **pValidMeter = malloc(POINTER_BYTES * (end - start)); + + int32_t numOfMeters = 0; + for (int32_t i = start; i < end; ++i) { + if (pMeterHeadDataInfo[i].pMeterQInfo->numOfPages > 0 && pMeterHeadDataInfo[i].pMeterQInfo->numOfRes > 0) { + pValidMeter[numOfMeters] = &pMeterHeadDataInfo[i]; + // set the merge start position: page:0, index:0 + posArray[numOfMeters].pageIdx = 0; + posArray[numOfMeters++].rowIdx = 0; + } + } + + if (numOfMeters == 0) { + tfree(posArray); + tfree(pValidMeter); + assert(pSupporter->numOfGroupResultPages == 0); + return 0; + } + + SCompSupporter cs = {pValidMeter, posArray, pSupporter}; // 1 == ascending order + SLoserTreeInfo *pTree = NULL; + + tLoserTreeCreate(&pTree, numOfMeters, &cs, meterResultComparator); + + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + resetMergeResultBuf(pQuery, pCtx); + + int64_t lastTimestamp = -1; + + int64_t startt = taosGetTimestampMs(); + + while (1) { + int32_t pos = pTree->pNode[0].index; + Position * position = &cs.pPosition[pos]; + tFilePage *pPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); + + int64_t ts = getCurrentTimestamp(&cs, pos); + if (ts == lastTimestamp) { // merge with the last one + doMerge(pQuery, pRuntimeEnv, ts, pPage, position->rowIdx, pRuntimeEnv->offset, pRuntimeEnv->numOfRowsPerPage, + true); + } else { + // copy data to disk buffer + if (buffer[0]->numOfElems == pQuery->pointsToRead) { + flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv); + resetMergeResultBuf(pQuery, pCtx); + } + + pPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); + if (pPage->numOfElems <= 0) { // current source data page is empty + // do nothing + } else { + doMerge(pQuery, pRuntimeEnv, ts, pPage, position->rowIdx, pRuntimeEnv->offset, pRuntimeEnv->numOfRowsPerPage, + false); + buffer[0]->numOfElems += 1; + } + } + + lastTimestamp = ts; + + if (cs.pPosition[pos].rowIdx >= pPage->numOfElems - 1) { + cs.pPosition[pos].rowIdx = 0; + cs.pPosition[pos].pageIdx += 1; // try next page + + /*check if current page is empty or not. if it is empty, ignore it and try + * next*/ + if (cs.pPosition[pos].pageIdx <= cs.pInfoEx[pos]->pMeterQInfo->numOfPages - 1) { + tFilePage *newPage = getMeterDataPage(cs.pSupporter, pValidMeter[pos], position->pageIdx); + if (newPage->numOfElems <= 0) { + /* if current source data page is null, it must be the last page of + * source output page */ + cs.pPosition[pos].pageIdx += 1; + assert(cs.pPosition[pos].pageIdx >= cs.pInfoEx[pos]->pMeterQInfo->numOfPages - 1); + } + } + + /* the following code must be executed if current source pages are + * exhausted */ + if (cs.pPosition[pos].pageIdx >= cs.pInfoEx[pos]->pMeterQInfo->numOfPages) { + cs.pPosition[pos].pageIdx = -1; + cs.pPosition[pos].rowIdx = -1; + + /* all input sources are exhausted */ + if (--numOfMeters == 0) { + break; + } + } + } else { + cs.pPosition[pos].rowIdx += 1; + } + + tLoserTreeAdjust(pTree, pos + pTree->numOfEntries); + } + + if (buffer[0]->numOfElems != 0) { // there are data in buffer + flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv); + } + + int64_t endt = taosGetTimestampMs(); + +#ifdef _DEBUG_VIEW + displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); +#endif + + dTrace("QInfo:%p result merge completed, elapsed time:%lld ms", GET_QINFO_ADDR(pQuery), endt - startt); + tfree(pTree); + tfree(pValidMeter); + tfree(posArray); + + pSupporter->offset = 0; + + return pSupporter->numOfGroupResultPages; +} + +static void extendDiskBuf(SMeterQuerySupportObj *pSupporter, int32_t numOfPages) { + assert(pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE == pSupporter->bufSize); + + munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); + pSupporter->numOfPages = numOfPages; + + // disk-based output buffer is exhausted, try to extend the disk-based buffer + int32_t ret = ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE); + if (ret != 0) { + perror("error in allocate the disk-based buffer"); + return; + } + + pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE; + pSupporter->meterOutputMMapBuf = + mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0); +} + +void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, const SQueryRuntimeEnv *pRuntimeEnv) { + int32_t numOfMeterResultBufPages = pSupporter->lastPageId + 1; + int64_t dstSize = numOfMeterResultBufPages * DEFAULT_INTERN_BUF_SIZE + + pSupporter->groupResultSize * (pSupporter->numOfGroupResultPages + 1); + + int32_t requiredPages = pSupporter->numOfPages; + if (requiredPages * DEFAULT_INTERN_BUF_SIZE < dstSize) { + while (requiredPages * DEFAULT_INTERN_BUF_SIZE < dstSize) { + requiredPages += pSupporter->numOfMeters; + } + + extendDiskBuf(pSupporter, requiredPages); + } + + char *lastPosition = pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * numOfMeterResultBufPages + + pSupporter->groupResultSize * pSupporter->numOfGroupResultPages; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t size = pRuntimeEnv->pCtx[i].outputBytes * pQuery->sdata[0]->len + sizeof(tFilePage); + memcpy(lastPosition, pQuery->sdata[i], size); + + lastPosition += pRuntimeEnv->pCtx[i].outputBytes * pQuery->pointsToRead + sizeof(tFilePage); + } + + pSupporter->numOfGroupResultPages += 1; +} + +void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx) { + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + pCtx[k].aOutputBuf = pQuery->sdata[k]->data - pCtx[k].outputBytes; + pCtx[k].size = 1; + pCtx[k].startOffset = 0; + pCtx[k].numOfIteratedElems = 0; + pQuery->sdata[k]->len = 0; + } +} + +void setMeterDataInfo(SMeterDataInfo *pMeterDataInfo, SMeterObj *pMeterObj, int32_t meterIdx, int32_t groupId) { + pMeterDataInfo->pMeterObj = pMeterObj; + pMeterDataInfo->groupIdx = groupId; + pMeterDataInfo->meterOrderIdx = meterIdx; +} + +void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + /* for interval query, close all unclosed results */ + if (pQuery->nAggTimeInterval > 0) { + SMeterDataInfo *pMeterInfo = pSupporter->pMeterDataInfo; + for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { + if (pMeterInfo[i].pMeterQInfo != NULL && pMeterInfo[i].pMeterQInfo->lastResRows > 0) { + int32_t index = pMeterInfo[i].meterOrderIdx; + + pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[index]->sid); + assert(pRuntimeEnv->pMeterObj == pMeterInfo[i].pMeterObj); + + setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo); + saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows); + } + } + } +} + +void disableFunctForSuppleScanAndSetSortOrder(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + pRuntimeEnv->pCtx[i].order = (pRuntimeEnv->pCtx[i].order) ^ 1; + int32_t functId = pQuery->pSelectExpr[i].pBase.functionId; + + if (((functId == TSDB_FUNC_FIRST || functId == TSDB_FUNC_FIRST_DST) && order == TSQL_SO_DESC) || + ((functId == TSDB_FUNC_LAST || functId == TSDB_FUNC_LAST_DST) && order == TSQL_SO_ASC)) { + pRuntimeEnv->go[i] = true; + } else if (functId != TSDB_FUNC_TS && functId != TSDB_FUNC_TAG) { + pRuntimeEnv->go[i] = false; + } + } + + pQuery->order.order = pQuery->order.order ^ 1; +} + +void enableFunctForMasterScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + pRuntimeEnv->pCtx[i].order = (pRuntimeEnv->pCtx[i].order) ^ 1; + pRuntimeEnv->go[i] = true; + } + + pQuery->order.order = (pQuery->order.order ^ 1); +} + +tFilePage **createInMemGroupResultBuf(SQLFunctionCtx *pCtx, int32_t nOutputCols, int32_t nAlloc) { + tFilePage **pTempBuf = malloc(POINTER_BYTES * nOutputCols); + for (int32_t i = 0; i < nOutputCols; ++i) { + pTempBuf[i] = malloc(sizeof(tFilePage) + pCtx[i].outputBytes * nAlloc); + pTempBuf[i]->numOfElems = 0; + } + return pTempBuf; +} + +void destroyBuf(tFilePage **pBuf, int32_t nOutputCols) { + for (int32_t i = 0; i < nOutputCols; ++i) { + free(pBuf[i]); + } + + free(pBuf); +} + +void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + int32_t rows = pRuntimeEnv->pMeterObj->pointsPerFileBlock; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + pRuntimeEnv->pCtx[i].numOfOutputElems = 0; + if (QUERY_IS_ASC_QUERY(pQuery)) { + pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data; + } else { // point to the last position of output buffer for desc query + pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data + (rows - 1) * pRuntimeEnv->pCtx[i].outputBytes; + } + } + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { + pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; + } + } +} + +void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { + SQuery *pQuery = pRuntimeEnv->pQuery; + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + // reset the execution contexts + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + int32_t functionId = pQuery->pSelectExpr[j].pBase.functionId; + assert(functionId != TSDB_FUNC_DIFF); + + // set next output position + if (IS_OUTER_FORWARD(aAggs[functionId].nStatus)) { + pRuntimeEnv->pCtx[j].aOutputBuf += pRuntimeEnv->pCtx[j].outputBytes * output * factor; + } + + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { + /* + * NOTE: for top/bottom query, the value of first column of output (timestamp) are assigned + * in the procedure of top/bottom routine + * the output buffer in top/bottom routine is ptsOutputBuf, so we need to forward the output buffer + * + * diff function is handled in multi-output function + */ + pRuntimeEnv->pCtx[j].ptsOutputBuf += TSDB_KEYSIZE * output * factor; + } + } +} + +void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + int32_t functionId = pQuery->pSelectExpr[j].pBase.functionId; + aAggs[functionId].init(&pRuntimeEnv->pCtx[j]); + } +} + +void cleanCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + int32_t rows = pRuntimeEnv->pMeterObj->pointsPerFileBlock; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memset(pQuery->sdata[i]->data, 0, (size_t)pQuery->pSelectExpr[i].resBytes * rows); + } +} + +void doSkipResults(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + if (pQuery->pointsRead == 0 || pQuery->limit.offset == 0) { + return; + } + + if (pQuery->pointsRead <= pQuery->limit.offset) { + pQuery->limit.offset -= pQuery->pointsRead; + + pQuery->pointsRead = 0; + pQuery->pointsOffset = pQuery->pointsToRead; // clear all data in result buffer + + resetCtxOutputBuf(pRuntimeEnv); + } else { + int32_t numOfSkip = (int32_t)pQuery->limit.offset; + int32_t size = pQuery->pointsRead; + + pQuery->pointsRead -= numOfSkip; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + + int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->pointsRead * bytes); + } else { // DESC query + int32_t maxrows = pQuery->pointsToRead; + + memmove(pQuery->sdata[i]->data + (maxrows - pQuery->pointsRead) * bytes, + pQuery->sdata[i]->data + (maxrows - size) * bytes, pQuery->pointsRead * bytes); + } + + pRuntimeEnv->pCtx[i].aOutputBuf -= bytes * numOfSkip * step; + + if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { + pRuntimeEnv->pCtx[i].ptsOutputBuf -= TSDB_KEYSIZE * numOfSkip * step; + } + } + + pQuery->limit.offset = 0; + } +} + +/** + * move remain data to the start position of output buffer + * @param pRuntimeEnv + */ +void moveDescOrderResultsToFront(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + int32_t maxrows = pQuery->pointsToRead; + + if (pQuery->pointsRead > 0 && pQuery->pointsRead < maxrows && !QUERY_IS_ASC_QUERY(pQuery)) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; + memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + (maxrows - pQuery->pointsRead) * bytes, + pQuery->pointsRead * bytes); + } + } +} + +typedef struct SQueryStatus { + SPositionInfo start; + SPositionInfo next; + SPositionInfo end; + + TSKEY skey; + TSKEY ekey; + int8_t overStatus; + TSKEY lastKey; +} SQueryStatus; + +static void queryStatusSave(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + pStatus->overStatus = pQuery->over; + pStatus->lastKey = pQuery->lastKey; + + pStatus->start = pRuntimeEnv->startPos; + pStatus->next = pRuntimeEnv->nextPos; + pStatus->end = pRuntimeEnv->endPos; + + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + + SWAP(pQuery->skey, pQuery->ekey); + pQuery->lastKey = pQuery->skey; + pRuntimeEnv->startPos = pRuntimeEnv->endPos; +} + +static void queryStatusRestore(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus *pStatus) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SWAP(pQuery->skey, pQuery->ekey); + + pQuery->lastKey = pStatus->lastKey; + + pQuery->over = pStatus->overStatus; + + pRuntimeEnv->startPos = pStatus->start; + pRuntimeEnv->nextPos = pStatus->next; + pRuntimeEnv->endPos = pStatus->end; +} + +static void doSupplementaryScan(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQueryStatus qStatus = {0}; + + if (!needSupplementaryScan(pQuery)) { + return; + } + + SET_SUPPLEMENT_SCAN_FLAG(pRuntimeEnv); + + // usually this load operation will incure load disk block operation + TSKEY endKey = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->endPos); + assert((QUERY_IS_ASC_QUERY(pQuery) && endKey <= pQuery->ekey) || + (!QUERY_IS_ASC_QUERY(pQuery) && endKey >= pQuery->ekey)); + + /* close necessary function execution during supplementary scan */ + disableFunctForSuppleScanAndSetSortOrder(pRuntimeEnv, pQuery->order.order); + queryStatusSave(pRuntimeEnv, &qStatus); + + doScanAllDataBlocks(pRuntimeEnv); + + // set the correct start position, and load the corresponding block in buffer if required. + TSKEY actKey = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->startPos); + assert((QUERY_IS_ASC_QUERY(pQuery) && actKey >= pQuery->skey) || + (!QUERY_IS_ASC_QUERY(pQuery) && actKey <= pQuery->skey)); + + queryStatusRestore(pRuntimeEnv, &qStatus); + enableFunctForMasterScan(pRuntimeEnv, pQuery->order.order); + SET_MASTER_SCAN_FLAG(pRuntimeEnv); +} + +void setQueryStatus(SQuery *pQuery, int8_t status) { + if (status == QUERY_NOT_COMPLETED) { + pQuery->over = status; + } else { + // QUERY_NOT_COMPLETED is not compatible with any other status, so clear its position first + pQuery->over &= (~QUERY_NOT_COMPLETED); + pQuery->over |= status; + } +} + +void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + + /* store the start query position */ + savePointPosition(&pRuntimeEnv->startPos, pQuery->fileId, pQuery->slot, pQuery->pos); + + while (1) { + doScanAllDataBlocks(pRuntimeEnv); + + /* applied to agg functions (e.g., stddev) */ + bool more = false; + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + pRuntimeEnv->go[j] = aAggs[pQuery->pSelectExpr[j].pBase.functionId].xNextStep(&pRuntimeEnv->pCtx[j]); + more |= pRuntimeEnv->go[j]; + } + + if (!more) { + break; + } + + // set the correct start position, and load the corresponding block in + // buffer if required. + TSKEY actKey = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->startPos); + assert((QUERY_IS_ASC_QUERY(pQuery) && actKey >= pQuery->skey) || + (!QUERY_IS_ASC_QUERY(pQuery) && actKey <= pQuery->skey)); + + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + pQuery->lastKey = pQuery->skey; + + /* check if query is killed or not */ + if (isQueryKilled(pQuery)) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return; + } + } + + doSupplementaryScan(pRuntimeEnv); + + /* reset status code */ + memset(pRuntimeEnv->go, true, (size_t)pQuery->numOfOutputCols); +} + +void doFinalizeResult(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + aAggs[pQuery->pSelectExpr[j].pBase.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); + } +} + +int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + int64_t maxOutput = 0; + + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + int32_t functionId = pQuery->pSelectExpr[j].pBase.functionId; + + /* + * ts, tag, tagprj function can not decide the output number of current query + * the number of output result is decided by main output + */ + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) { + continue; + } + + if (maxOutput < pRuntimeEnv->pCtx[j].numOfOutputElems) { + maxOutput = pRuntimeEnv->pCtx[j].numOfOutputElems; + } + } + return maxOutput; +} + +/* + * forward the query range for next interval query + */ +void forwardIntervalQueryRange(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + pQuery->ekey += (pQuery->nAggTimeInterval * factor); + pQuery->skey = pQuery->ekey - (pQuery->nAggTimeInterval - 1) * factor; + + // boundary check + if (QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->skey > pSupporter->rawEKey) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return; + } + + if (pQuery->ekey > pSupporter->rawEKey) { + pQuery->ekey = pSupporter->rawEKey; + } + } else { + if (pQuery->skey < pSupporter->rawEKey) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return; + } + + if (pQuery->ekey < pSupporter->rawEKey) { + pQuery->ekey = pSupporter->rawEKey; + } + } + + /* ensure the search in cache will return right position */ + pQuery->lastKey = pQuery->skey; + + TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); + if ((nextTimestamp > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextTimestamp < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery)) || + Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { + setQueryStatus(pQuery, QUERY_COMPLETED); + return; + } + + // bridge the gap in group by time function + if ((nextTimestamp > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextTimestamp < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + getAlignedIntervalQueryRange(pQuery, nextTimestamp, pSupporter->rawSKey, pSupporter->rawEKey); + } +} + +static int32_t offsetComparator(const void *pLeft, const void *pRight) { + SMeterDataInfo **pLeft1 = (SMeterDataInfo **)pLeft; + SMeterDataInfo **pRight1 = (SMeterDataInfo **)pRight; + + if ((*pLeft1)->offsetInHeaderFile == (*pRight1)->offsetInHeaderFile) { + return 0; + } + + return ((*pLeft1)->offsetInHeaderFile > (*pRight1)->offsetInHeaderFile) ? 1 : -1; +} + +/** + * + * @param pQInfo + * @param fid + * @param pQueryFileInfo + * @param start + * @param end + * @param pMeterHeadDataInfo + * @return + */ +SMeterDataInfo **vnodeFilterQualifiedMeters(SQInfo *pQInfo, int32_t vid, SQueryFileInfo *pQueryFileInfo, + tSidSet *pSidSet, SMeterDataInfo *pMeterDataInfo, int32_t *numOfMeters) { + SQuery * pQuery = &pQInfo->query; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SMeterSidExtInfo ** pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; + + SVnodeObj *pVnode = &vnodeList[vid]; + + char * pHeaderData = pQueryFileInfo->pHeaderFileData; + int32_t tmsize = sizeof(SCompHeader) * (pVnode->cfg.maxSessions) + sizeof(TSCKSUM); + + if (validateHeaderOffsetSegment(pQInfo, pQueryFileInfo->headerFilePath, vid, pHeaderData, tmsize) < 0) { + /* file is corrupted, abort query in current file */ + *numOfMeters = 0; + return 0; + } + + int64_t oldestKey = getOldestKey(pVnode->numOfFiles, pVnode->fileId, &pVnode->cfg); + SMeterDataInfo **pReqMeterDataInfo = malloc(POINTER_BYTES * pSidSet->numOfSids); + + int32_t groupId = 0; + TSKEY skey, ekey; + + for (int32_t i = 0; i < pSidSet->numOfSids; ++i) { // load all meter meta info + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[i]->sid); + if (pMeterObj == NULL) { + dError("QInfo:%p failed to find required sid:%d", pQInfo, pMeterSidExtInfo[i]->sid); + continue; + } + + if (i >= pSidSet->starterPos[groupId + 1]) { + groupId += 1; + } + + SMeterDataInfo *pOneMeterDataInfo = &pMeterDataInfo[i]; + if (pOneMeterDataInfo->pMeterObj == NULL) { + setMeterDataInfo(pOneMeterDataInfo, pMeterObj, i, groupId); + } + + /* restore possible exists new query range for this meter, which starts from cache */ + if (pOneMeterDataInfo->pMeterQInfo != NULL) { + skey = pOneMeterDataInfo->pMeterQInfo->lastKey; + } else { + skey = pSupporter->rawSKey; + } + + // query on disk data files, which actually starts from the lastkey + ekey = pSupporter->rawEKey; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + assert(skey >= pSupporter->rawSKey); + if (ekey < oldestKey || skey > pMeterObj->lastKeyOnFile) { + continue; + } + } else { + assert(skey <= pSupporter->rawSKey); + if (skey < oldestKey || ekey > pMeterObj->lastKeyOnFile) { + continue; + } + } + + int64_t headerOffset = TSDB_FILE_HEADER_LEN + sizeof(SCompHeader) * pMeterObj->sid; + + SCompHeader *compHeader = (SCompHeader *)(pHeaderData + headerOffset); + comp_block_info_read_bytes += sizeof(SCompHeader); + + if (compHeader->compInfoOffset == 0) { + continue; + } + + if (compHeader->compInfoOffset < sizeof(SCompHeader) * pVnode->cfg.maxSessions + TSDB_FILE_HEADER_LEN || + compHeader->compInfoOffset > pQueryFileInfo->headFileSize) { + dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid", pQuery, pMeterObj->vnode, pMeterObj->sid, + pMeterObj->meterId, compHeader->compInfoOffset); + continue; + } + + pOneMeterDataInfo->offsetInHeaderFile = (uint64_t)compHeader->compInfoOffset; + setMeterQueryInfo(pSupporter, pOneMeterDataInfo); + + pReqMeterDataInfo[*numOfMeters] = pOneMeterDataInfo; + (*numOfMeters) += 1; + } + + assert(*numOfMeters <= pSidSet->numOfSids); + + /* enable access sequentially */ + if (*numOfMeters > 1) { + qsort(pReqMeterDataInfo, *numOfMeters, POINTER_BYTES, offsetComparator); + } + + return pReqMeterDataInfo; +} + +void setMeterQueryInfo(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pMeterDataInfo) { + if (pMeterDataInfo->pMeterQInfo != NULL) { + return; + } + + pMeterDataInfo->pMeterQInfo = calloc(1, sizeof(SMeterQueryInfo)); + SMeterQueryInfo *pMQInfo = pMeterDataInfo->pMeterQInfo; + + pMQInfo->skey = pSupporter->rawSKey; + pMQInfo->ekey = pSupporter->rawEKey; + pMQInfo->lastKey = pSupporter->rawSKey; + + pMQInfo->numOfPages = 0; // one page + pMQInfo->numOfAlloc = INIT_ALLOCATE_DISK_PAGES; + pMQInfo->pageList = calloc(pMQInfo->numOfAlloc, sizeof(uint32_t)); + pMQInfo->lastResRows = 0; +} + +void incOutputPageId(SMeterQueryInfo *pMeterQInfo, uint32_t pageId) { + if (pMeterQInfo->numOfPages >= pMeterQInfo->numOfAlloc) { + pMeterQInfo->numOfAlloc = pMeterQInfo->numOfAlloc << 1; + + pMeterQInfo->pageList = realloc(pMeterQInfo->pageList, sizeof(uint32_t) * pMeterQInfo->numOfAlloc); + } + + pMeterQInfo->pageList[pMeterQInfo->numOfPages++] = pageId; +} + +void destroyMeterQueryInfo(SMeterQueryInfo *pMeterQInfo) { + if (pMeterQInfo == NULL) { + return; + } + + free(pMeterQInfo->pageList); + free(pMeterQInfo); +} + +static void clearMeterDataBlockInfo(SMeterDataInfo *pMeterDataInfo) { + tfree(pMeterDataInfo->pBlock); + pMeterDataInfo->numOfBlocks = 0; + pMeterDataInfo->start = 0; +} + +static bool getValidDataBlocksRangeIndex(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, SCompBlock *pCompBlock, + int64_t numOfBlocks, TSKEY minval, TSKEY maxval, int32_t *end) { + SMeterObj *pMeterObj = pMeterDataInfo->pMeterObj; + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + + /* + * search the possible blk that may satisfy the query condition always start from the min value, therefore, + * the order is always ascending order + */ + pMeterDataInfo->start = binarySearchForBlockImpl(pCompBlock, (int32_t)numOfBlocks, minval, TSQL_SO_ASC); + if (minval > pCompBlock[pMeterDataInfo->start].keyLast || maxval < pCompBlock[pMeterDataInfo->start].keyFirst) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files", pQInfo, pMeterObj->vnode, pMeterObj->sid, + pMeterObj->meterId); + return false; + } + + // incremental checks following blocks until whose time range does not overlap with the query range + *end = pMeterDataInfo->start; + while (*end <= (numOfBlocks - 1)) { + if (pCompBlock[*end].keyFirst <= maxval && pCompBlock[*end].keyLast >= maxval) { + break; + } + + if (pCompBlock[*end].keyFirst > maxval) { + *end -= 1; + break; + } + + if (*end == numOfBlocks - 1) { + break; + } else { + ++(*end); + } + } + + return true; +} + +static bool setValidDataBlocks(SMeterDataInfo *pMeterDataInfo, SCompBlock *pCompBlock, int32_t end) { + int32_t size = (end - pMeterDataInfo->start) + 1; + assert(size > 0); + + if (size != pMeterDataInfo->numOfBlocks) { + char *tmp = realloc(pMeterDataInfo->pBlock, POINTER_BYTES * size); + if (tmp == NULL) { + return false; + } + + pMeterDataInfo->pBlock = (SCompBlock **)tmp; + pMeterDataInfo->numOfBlocks = size; + } + + for (int32_t i = pMeterDataInfo->start, j = 0; i <= end; ++i, ++j) { + pMeterDataInfo->pBlock[j] = &pCompBlock[i]; + } + + return true; +} + +static bool setCurrentQueryRange(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, TSKEY endKey, TSKEY *minval, + TSKEY *maxval) { + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + SMeterObj * pMeterObj = pMeterDataInfo->pMeterObj; + SMeterQueryInfo *pMeterQInfo = pMeterDataInfo->pMeterQInfo; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + *minval = pMeterQInfo->lastKey; + *maxval = endKey; + } else { + *minval = endKey; + *maxval = pMeterQInfo->lastKey; + } + + if (*minval > *maxval) { + qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%lld-%lld, lastKey:%lld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, + pMeterQInfo->lastKey); + return false; + } else { + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%lld-%lld, lastKey:%lld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, + pMeterQInfo->lastKey); + return true; + } +} + +/** + * + * @param pQuery + * @param pHeaderData + * @param numOfMeters + * @param pMeterDataInfo + * @return + */ +uint32_t getDataBlocksForMeters(SMeterQuerySupportObj *pSupporter, SQuery *pQuery, char *pHeaderData, + int32_t numOfMeters, SQueryFileInfo *pQueryFileInfo, SMeterDataInfo **pMeterDataInfo) { + uint32_t numOfBlocks = 0; + SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + SQueryCostStatistics *pSummary = &pSupporter->runtimeEnv.summary; + + TSKEY minval, maxval; + + // sequentially scan this header file to extract the compHeader info + for (int32_t j = 0; j < numOfMeters; ++j) { + SMeterObj *pMeterObj = pMeterDataInfo[j]->pMeterObj; + + SCompInfo *compInfo = (SCompInfo *)(pHeaderData + pMeterDataInfo[j]->offsetInHeaderFile); + int32_t ret = validateCompBlockInfoSegment(pQInfo, pQueryFileInfo->headerFilePath, pMeterObj->vnode, compInfo, + pMeterDataInfo[j]->offsetInHeaderFile); + if (ret != 0) { + clearMeterDataBlockInfo(pMeterDataInfo[j]); + continue; + } + + if (compInfo->numOfBlocks <= 0 || compInfo->uid != pMeterDataInfo[j]->pMeterObj->uid) { + clearMeterDataBlockInfo(pMeterDataInfo[j]); + continue; + } + + int32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); + SCompBlock *pCompBlock = (SCompBlock *)((char *)compInfo + sizeof(SCompInfo)); + + int64_t st = taosGetTimestampUs(); + + // check compblock integrity + TSCKSUM checksum = *(TSCKSUM *)((char *)compInfo + sizeof(SCompInfo) + size); + ret = validateCompBlockSegment(pQInfo, pQueryFileInfo->headerFilePath, compInfo, (char *)pCompBlock, + pMeterObj->vnode, checksum); + if (ret < 0) { + clearMeterDataBlockInfo(pMeterDataInfo[j]); + continue; + } + + int64_t et = taosGetTimestampUs(); + + pSummary->readCompInfo++; + pSummary->totalCompInfoSize += (size + sizeof(SCompInfo) + sizeof(TSCKSUM)); + pSummary->loadCompInfoUs += (et - st); + + if (!setCurrentQueryRange(pMeterDataInfo[j], pQuery, pSupporter->rawEKey, &minval, &maxval)) { + clearMeterDataBlockInfo(pMeterDataInfo[j]); + continue; + } + + int32_t end = 0; + if (!getValidDataBlocksRangeIndex(pMeterDataInfo[j], pQuery, pCompBlock, compInfo->numOfBlocks, minval, maxval, + &end)) { + clearMeterDataBlockInfo(pMeterDataInfo[j]); + continue; + } + + if (!setValidDataBlocks(pMeterDataInfo[j], pCompBlock, end)) { + clearMeterDataBlockInfo(pMeterDataInfo[j]); + pQInfo->killed = 1; // todo set query kill, abort current query since no + // memory available + return 0; + } + + qTrace("QInfo:%p vid:%d sid:%d id:%s, startIndex:%d, %d blocks qualified", pQInfo, pMeterObj->vnode, pMeterObj->sid, + pMeterObj->meterId, pMeterDataInfo[j]->start, pMeterDataInfo[j]->numOfBlocks); + + numOfBlocks += pMeterDataInfo[j]->numOfBlocks; + } + + return numOfBlocks; +} + +static void freeDataBlockFieldInfo(SMeterDataBlockInfoEx *pDataBlockInfoEx, int32_t len) { + for (int32_t i = 0; i < len; ++i) { + tfree(pDataBlockInfoEx[i].pBlock.fields); + } +} + +void freeMeterBlockInfoEx(SMeterDataBlockInfoEx *pDataBlockInfoEx, int32_t len) { + freeDataBlockFieldInfo(pDataBlockInfoEx, len); + tfree(pDataBlockInfoEx); +} + +typedef struct SBlockOrderSupporter { + int32_t numOfMeters; + SMeterDataBlockInfoEx **pDataBlockInfoEx; + int32_t * blockIndexArray; + int32_t * numOfBlocksPerMeter; +} SBlockOrderSupporter; + +static int32_t blockAccessOrderComparator(const void *pLeft, const void *pRight, void *param) { + int32_t leftTableIndex = *(int32_t *)pLeft; + int32_t rightTableIndex = *(int32_t *)pRight; + + SBlockOrderSupporter *pSupporter = (SBlockOrderSupporter *)param; + + int32_t leftTableBlockIndex = pSupporter->blockIndexArray[leftTableIndex]; + int32_t rightTableBlockIndex = pSupporter->blockIndexArray[rightTableIndex]; + + if (leftTableBlockIndex > pSupporter->numOfBlocksPerMeter[leftTableIndex]) { + /* left block is empty */ + return 1; + } else if (rightTableBlockIndex > pSupporter->numOfBlocksPerMeter[rightTableIndex]) { + /* right block is empty */ + return -1; + } + + SMeterDataBlockInfoEx *pLeftBlockInfoEx = &pSupporter->pDataBlockInfoEx[leftTableIndex][leftTableBlockIndex]; + SMeterDataBlockInfoEx *pRightBlockInfoEx = &pSupporter->pDataBlockInfoEx[rightTableIndex][rightTableBlockIndex]; + + // assert(pLeftBlockInfoEx->pBlock.compBlock->offset != pRightBlockInfoEx->pBlock.compBlock->offset); + if (pLeftBlockInfoEx->pBlock.compBlock->offset == pRightBlockInfoEx->pBlock.compBlock->offset && + pLeftBlockInfoEx->pBlock.compBlock->last == pRightBlockInfoEx->pBlock.compBlock->last) { + // todo add more information + dError("error in header file, two block with same offset:%p", pLeftBlockInfoEx->pBlock.compBlock->offset); + } + + return pLeftBlockInfoEx->pBlock.compBlock->offset > pRightBlockInfoEx->pBlock.compBlock->offset ? 1 : -1; +} + +int32_t createDataBlocksInfoEx(SMeterDataInfo **pMeterDataInfo, int32_t numOfMeters, + SMeterDataBlockInfoEx **pDataBlockInfoEx, int32_t numOfCompBlocks, + int32_t *nAllocBlocksInfoSize, int64_t addr) { + /* release allocated memory first */ + freeDataBlockFieldInfo(*pDataBlockInfoEx, *nAllocBlocksInfoSize); + + if (*nAllocBlocksInfoSize == 0 || *nAllocBlocksInfoSize < numOfCompBlocks) { + *pDataBlockInfoEx = + (SMeterDataBlockInfoEx *)realloc((*pDataBlockInfoEx), sizeof(SMeterDataBlockInfoEx) * numOfCompBlocks); + memset((*pDataBlockInfoEx), 0, sizeof(SMeterDataBlockInfoEx) * numOfCompBlocks); + *nAllocBlocksInfoSize = numOfCompBlocks; + } + + SBlockOrderSupporter supporter = {0}; + supporter.numOfMeters = numOfMeters; + supporter.numOfBlocksPerMeter = calloc(1, sizeof(int32_t) * numOfMeters); + supporter.blockIndexArray = calloc(1, sizeof(int32_t) * numOfMeters); + supporter.pDataBlockInfoEx = calloc(1, POINTER_BYTES * numOfMeters); + + int32_t cnt = 0; + int32_t numOfQualMeters = 0; + for (int32_t j = 0; j < numOfMeters; ++j) { + if (pMeterDataInfo[j]->numOfBlocks == 0) { + continue; + } + + SCompBlock **pBlock = pMeterDataInfo[j]->pBlock; + supporter.numOfBlocksPerMeter[numOfQualMeters] = pMeterDataInfo[j]->numOfBlocks; + supporter.pDataBlockInfoEx[numOfQualMeters] = + calloc(1, sizeof(SMeterDataBlockInfoEx) * pMeterDataInfo[j]->numOfBlocks); + + for (int32_t k = 0; k < pMeterDataInfo[j]->numOfBlocks; ++k) { + SMeterDataBlockInfoEx *pInfoEx = &supporter.pDataBlockInfoEx[numOfQualMeters][k]; + + pInfoEx->pBlock.compBlock = pBlock[k]; + pInfoEx->pBlock.fields = NULL; + + pInfoEx->pMeterDataInfo = pMeterDataInfo[j]; + pInfoEx->groupIdx = pMeterDataInfo[j]->groupIdx; // set the group index + pInfoEx->blockIndex = pMeterDataInfo[j]->start + k; // set the block index in original meter + cnt++; + } + + numOfQualMeters++; + } + + dTrace("QInfo %p create data blocks info struct completed", addr); + + assert(cnt <= numOfCompBlocks && numOfQualMeters <= numOfMeters); + supporter.numOfMeters = numOfQualMeters; + SLoserTreeInfo *pTree = NULL; + + uint8_t ret = tLoserTreeCreate(&pTree, supporter.numOfMeters, &supporter, blockAccessOrderComparator); + UNUSED(ret); + + int32_t numOfTotal = 0; + + while (numOfTotal < cnt) { + int32_t pos = pTree->pNode[0].index; + SMeterDataBlockInfoEx *pBlocksInfoEx = supporter.pDataBlockInfoEx[pos]; + int32_t index = supporter.blockIndexArray[pos]++; + + (*pDataBlockInfoEx)[numOfTotal++] = pBlocksInfoEx[index]; + + if (supporter.blockIndexArray[pos] >= supporter.numOfBlocksPerMeter[pos]) { + /* set data block index overflow, in order to disable the offset comparator */ + supporter.blockIndexArray[pos] = supporter.numOfBlocksPerMeter[pos] + 1; + } + tLoserTreeAdjust(pTree, pos + supporter.numOfMeters); + } + + /* + * available when no import exists + * for(int32_t i = 0; i < cnt - 1; ++i) { + * assert((*pDataBlockInfoEx)[i].pBlock.compBlock->offset < (*pDataBlockInfoEx)[i+1].pBlock.compBlock->offset); + * } + */ + + dTrace("QInfo %p %d data blocks sort completed", addr, cnt); + + tfree(supporter.numOfBlocksPerMeter); + tfree(supporter.blockIndexArray); + + for (int32_t i = 0; i < numOfMeters; ++i) { + tfree(supporter.pDataBlockInfoEx[i]); + } + + tfree(supporter.pDataBlockInfoEx); + free(pTree); + + return cnt; +} + +/** + * set output buffer for different group + * @param pRuntimeEnv + * @param pDataBlockInfoEx + */ +void setExecutionContext(SMeterQuerySupportObj *pSupporter, SOutputRes *outputRes, int32_t meterIdx, int32_t groupIdx) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + setOutputBuffer(pRuntimeEnv, &outputRes[groupIdx]); + + if (outputRes[groupIdx].numOfRows == 0) { + initCtxOutputBuf(pRuntimeEnv); + } + + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + pRuntimeEnv->pCtx[j].numOfOutputElems = 0; + pRuntimeEnv->pCtx[j].numOfIteratedElems = 0; + } + + vnodeSetTagValueInParam(pSupporter->pSidSet, pRuntimeEnv, pSupporter->pMeterSidExtInfo[meterIdx]); +} + +static void setOutputBuffer(SQueryRuntimeEnv *pRuntimeEnv, SOutputRes *pResult) { + // Note: pResult->result[i]->numOfElems == 0, there is only fixed number of + // results for each group + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { + // the value is not set yet + assert(pResult->result[i]->numOfElems == 0); + pRuntimeEnv->pCtx[i].aOutputBuf = + pResult->result[i]->data + pRuntimeEnv->pCtx[i].outputBytes * pResult->result[i]->numOfElems; + } +} + +static tFilePage *allocNewPage(SMeterQuerySupportObj *pSupporter, uint32_t *pageId) { + if (pSupporter->lastPageId == pSupporter->numOfPages - 1) { + extendDiskBuf(pSupporter, pSupporter->numOfPages + pSupporter->numOfMeters); + } + + *pageId = (++pSupporter->lastPageId); + + return getFilePage(pSupporter, *pageId); +} + +static char *getOutputResPos(SQueryRuntimeEnv *pRuntimeEnv, tFilePage *pData, int32_t row, int32_t col) { + return (char *)pData->data + pRuntimeEnv->offset[col] * pRuntimeEnv->numOfRowsPerPage + + pRuntimeEnv->pCtx[col].outputBytes * row; +} + +void setCtxOutputPointerForSupplementScan(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *sqinfo) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + int32_t index = sqinfo->reverseIndex; + tFilePage *pData = NULL; + int32_t i = 0; + + // find the position for this output result + for (; i < sqinfo->numOfPages; ++i) { + pData = getFilePage(pSupporter, sqinfo->pageList[i]); + if (index <= pData->numOfElems) { + break; + } + index -= pData->numOfElems; + } + + assert(index >= 0); + + /* + * if it is the first records in master scan, no next results exist, so no need to init the result buffer + * all data are processed and save to buffer during supplementary scan. + */ + if (index == 0) { + return; + } + + for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) { + pRuntimeEnv->pCtx[k].aOutputBuf = getOutputResPos(pRuntimeEnv, pData, index - 1, k); + if (!pRuntimeEnv->go[k]) { + continue; + } + + int32_t functId = pQuery->pSelectExpr[k].pBase.functionId; + + /* setup runtime environment */ + if ((QUERY_IS_ASC_QUERY(pQuery) && functId == TSDB_FUNC_FIRST_DST) || + (!QUERY_IS_ASC_QUERY(pQuery) && functId == TSDB_FUNC_LAST_DST)) { + if (sqinfo->lastResRows == 0) { + aAggs[functId].init(&pRuntimeEnv->pCtx[k]); + } + } + } + + TSKEY ts = *(TSKEY *)pRuntimeEnv->pCtx[0].aOutputBuf; + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%lld, index:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, sqinfo->reverseIndex); +} + +void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; + + if (pRuntimeEnv->scanFlag == SUPPLEMENTARY_SCAN && numOfIncrementRes > 0) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_TS) { + assert(*(TSKEY *)pCtx[i].aOutputBuf == pCtx[i].nStartQueryTimestamp); + } + } + } +} + +void setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *sqinfo) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + tFilePage * pData = NULL; + uint32_t newPageId = 0; + + // in the first scan, new space needed for results + if (sqinfo->numOfPages == 0) { + pData = allocNewPage(pSupporter, &newPageId); + incOutputPageId(sqinfo, newPageId); + } else { + int32_t lastPageId = sqinfo->pageList[sqinfo->numOfPages - 1]; + pData = getFilePage(pSupporter, lastPageId); + + if (pData->numOfElems >= pRuntimeEnv->numOfRowsPerPage) { + pData = allocNewPage(pSupporter, &newPageId); + incOutputPageId(sqinfo, newPageId); + + // number of elements must be 0 for newly allocated buffer + assert(pData->numOfElems == 0); + } + } + + for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) { + pRuntimeEnv->pCtx[i].aOutputBuf = getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, i); + } +} + +void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx, SMeterQueryInfo *sqinfo) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + if (IS_MASTER_SCAN(pRuntimeEnv)) { + setOutputBufferForIntervalQuery(pSupporter, sqinfo); + + if (sqinfo->lastResRows == 0) { + initCtxOutputBuf(pRuntimeEnv); + } + + /* reset the number of iterated elements, once this function is called. since the pCtx for different + */ + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + pRuntimeEnv->pCtx[j].numOfOutputElems = 0; + pRuntimeEnv->pCtx[j].numOfIteratedElems = 0; + } + + } else { + if (sqinfo->reverseFillRes) { + setCtxOutputPointerForSupplementScan(pSupporter, sqinfo); + } else { // find the correct output position of existed results during + // reverse scan data blocks + /* + * the master scan does not produce any results yet, + * new spaces needed to be allocated during supplementary scan + */ + setOutputBufferForIntervalQuery(pSupporter, sqinfo); + } + } + + vnodeSetTagValueInParam(pSupporter->pSidSet, pRuntimeEnv, pSupporter->pMeterSidExtInfo[meterIdx]); +} + +static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pInfo, + SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields, + __block_search_fn_t searchFn) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + int64_t nextKey = -1; + bool queryCompleted = false; + + while (1) { + int32_t numOfRes = 0; + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryCol, sdata, pFields, searchFn, &numOfRes); + assert(steps > 0); + + // NOTE: in case of stable query, only ONE(or ZERO) row of result generated for each query range + if (pInfo->lastResRows == 0) { + pInfo->lastResRows = numOfRes; + } else { + assert(pInfo->lastResRows == 1); + } + + int32_t pos = pQuery->pos + steps * factor; + + // query does not reach the end of current block + if ((pos < pBlockInfo->size && QUERY_IS_ASC_QUERY(pQuery)) || (pos >= 0 && !QUERY_IS_ASC_QUERY(pQuery))) { + nextKey = pPrimaryCol[pos]; + } else { + assert((pQuery->lastKey > pBlockInfo->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey < pBlockInfo->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))); + } + + // all data satisfy current query are checked, query completed + if (QUERY_IS_ASC_QUERY(pQuery)) { + queryCompleted = (nextKey > pQuery->ekey || pQuery->ekey <= pBlockInfo->keyLast); + } else { + queryCompleted = (nextKey < pQuery->ekey || pQuery->ekey >= pBlockInfo->keyFirst); + } + + /* + * 1. there may be more date that satisfy current query interval, other than current block, we need to + * try next data blocks + * 2. query completed, since reaches the upper bound of the main query range + */ + if (QUERY_IS_ASC_QUERY(pQuery)) { + if (pQuery->lastKey > pBlockInfo->keyLast || pQuery->lastKey > pSupporter->rawEKey || + nextKey > pSupporter->rawEKey) { + /* + * current interval query is completed, set query result flag closed and try next data block + * if pQuery->ekey == pSupporter->rawEKey, whole query is completed + */ + if (pQuery->lastKey > pBlockInfo->keyLast) { + assert(pQuery->ekey >= pBlockInfo->keyLast); + } + + if (pQuery->lastKey > pSupporter->rawEKey || nextKey > pSupporter->rawEKey) { + /* whole query completed, save result and abort */ + assert(queryCompleted); + saveResult(pSupporter, pInfo, pInfo->lastResRows); + + /* + * save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. + */ + saveIntervalQueryRange(pQuery, pInfo); + } else if (pQuery->ekey == pBlockInfo->keyLast) { + /* current interval query is completed, set the next query range on other data blocks if exist */ + int64_t prevEKey = pQuery->ekey; + + getAlignedIntervalQueryRange(pQuery, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pQuery, pInfo); + + assert(queryCompleted && prevEKey < pQuery->skey); + if (pInfo->lastResRows > 0) { + saveResult(pSupporter, pInfo, pInfo->lastResRows); + } + } else { + /* + * Data that satisfy current query range may locate in current block and blocks that are + * directly right next to current block. Therefore, we need to keep the query range(interval) + * unchanged until reaching the direct next data block, while only forwards the pQuery->lastKey. + * + * With the information of the directly next data block, whether locates in cache or disk, + * current interval query being completed or not can be decided. + */ + saveIntervalQueryRange(pQuery, pInfo); + assert(pQuery->lastKey > pBlockInfo->keyLast && pQuery->lastKey <= pQuery->ekey); + + /* + * if current block is the last block of current file, + * we still close the result flag, and merge with other meters in the same group + */ + if (queryCompleted) { + saveResult(pSupporter, pInfo, pInfo->lastResRows); + } + } + + break; + } + } else { + if (pQuery->lastKey < pBlockInfo->keyFirst || pQuery->lastKey < pSupporter->rawEKey || + nextKey < pSupporter->rawEKey) { + if (pQuery->lastKey < pBlockInfo->keyFirst) { + assert(pQuery->ekey <= pBlockInfo->keyFirst); + } + + if (pQuery->lastKey < pSupporter->rawEKey || (nextKey < pSupporter->rawEKey && nextKey != -1)) { + /* whole query completed, save result and abort */ + assert(queryCompleted); + saveResult(pSupporter, pInfo, pInfo->lastResRows); + + /* + * save the pQuery->lastKey for retrieve data in cache, actually, there will be no qualified data in cache. + */ + saveIntervalQueryRange(pQuery, pInfo); + } else if (pQuery->ekey == pBlockInfo->keyFirst) { + /* current interval query is completed, set the next query range on other data blocks if exist */ + int64_t prevEKey = pQuery->ekey; + + getAlignedIntervalQueryRange(pQuery, pQuery->lastKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pQuery, pInfo); + + assert(queryCompleted && prevEKey > pQuery->skey); + if (pInfo->lastResRows > 0) { + saveResult(pSupporter, pInfo, pInfo->lastResRows); + } + } else { + /* + * Data that satisfy current query range may locate in current block and blocks that are + * directly right next to current block. Therefore, we need to keep the query range(interval) + * unchanged until reaching the direct next data block, while only forwards the pQuery->lastKey. + * + * With the information of the directly next data block, whether locates in cache or disk, + * current interval query being completed or not can be decided. + */ + saveIntervalQueryRange(pQuery, pInfo); + assert(pQuery->lastKey < pBlockInfo->keyFirst && pQuery->lastKey >= pQuery->ekey); + + /* + * if current block is the last block of current file, + * we still close the result flag, and merge with other meters in the same group + */ + if (queryCompleted) { + saveResult(pSupporter, pInfo, pInfo->lastResRows); + } + } + + break; + } + } + + assert(queryCompleted); + saveResult(pSupporter, pInfo, pInfo->lastResRows); + + assert((nextKey >= pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextKey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); + + /* still in the same block to query */ + getAlignedIntervalQueryRange(pQuery, nextKey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pQuery, pInfo); + + int32_t newPos = searchFn((char *)pPrimaryCol, pBlockInfo->size, pQuery->skey, pQuery->order.order); + assert(newPos == pQuery->pos + steps * factor); + + pQuery->pos = newPos; + } +} + +int64_t getNextAccessedKeyInData(SQuery *pQuery, int64_t *pPrimaryCol, SBlockInfo *pBlockInfo, int32_t blockStatus) { + assert(pQuery->pos >= 0 && pQuery->pos <= pBlockInfo->size - 1); + + TSKEY key = -1; + if (IS_DATA_BLOCK_LOADED(blockStatus)) { + key = pPrimaryCol[pQuery->pos]; + } else { + assert(pQuery->pos == pBlockInfo->size - 1 || pQuery->pos == 0); + key = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->keyFirst : pBlockInfo->keyLast; + } + + assert((key >= pQuery->skey && QUERY_IS_ASC_QUERY(pQuery)) || (key <= pQuery->skey && !QUERY_IS_ASC_QUERY(pQuery))); + return key; +} + +void setIntervalQueryRange(SMeterQuerySupportObj *pSupporter, TSKEY key, SMeterDataInfo *pInfoEx) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + SMeterQueryInfo *pMeterQueryInfo = pInfoEx->pMeterQInfo; + if (pMeterQueryInfo->queryRangeSet) { + assert((QUERY_IS_ASC_QUERY(pQuery) && pQuery->lastKey >= pQuery->skey) || + (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->lastKey <= pQuery->skey)); + + if ((pQuery->ekey < key && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->ekey > key && !QUERY_IS_ASC_QUERY(pQuery))) { + /* + * last query on this block of the meter is done, start next interval on this block + * otherwise, keep the previous query range and proceed + */ + getAlignedIntervalQueryRange(pQuery, key, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pQuery, pMeterQueryInfo); + + // previous query does not be closed, close it + if (pMeterQueryInfo->lastResRows > 0) { + saveResult(pSupporter, pMeterQueryInfo, pMeterQueryInfo->lastResRows); + } + } else { + /* current query not completed, continue. do nothing with respect to query range, */ + } + } else { + /* + * There are two cases to handle for the first block. + * 1. Query range is not set yet. we need to set the query range, pQuery->lastKey, pQuery->skey, pQuery->eKey. + * 2. Query range is set and in progress. There may be another result with the same query ranges to be merged + * during merge stage. However, in this case, we need the + * pMeterQueryInfo->lastQueryClosed to decide if there is a previous result be generated or not. + */ + pQuery->skey = key; + assert(pMeterQueryInfo->lastResRows == 0); + + if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey < pQuery->skey)) || + (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey < pQuery->ekey))) { + // for too small query range, no data in this interval. + return; + } + + getAlignedIntervalQueryRange(pQuery, pQuery->skey, pSupporter->rawSKey, pSupporter->rawEKey); + saveIntervalQueryRange(pQuery, pMeterQueryInfo); + pMeterQueryInfo->queryRangeSet = 1; + } +} + +bool requireTimestamp(SQuery *pQuery) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; i++) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_NEED_TS) != 0) { + return true; + } + } + return false; +} + +static void setTimestampRange(SQueryRuntimeEnv *pRuntimeEnv, int64_t stime, int64_t etime) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functionId = pQuery->pSelectExpr[i].pBase.functionId; + if (functionId == TSDB_FUNC_SPREAD || functionId == TSDB_FUNC_SPREAD_DST) { + pRuntimeEnv->pCtx[i].intermediateBuf[1].dKey = stime; + pRuntimeEnv->pCtx[i].intermediateBuf[2].dKey = etime; + } + } +} + +bool needPrimaryTimestampCol(SQuery *pQuery, SBlockInfo *pBlockInfo) { + /* + * 1. if skey or ekey locates in this block, we need to load the timestamp column to decide the precise position + * 2. if there are top/bottom, first_dst/last_dst functions, we need to load timestamp column in any cases; + */ + bool loadPrimaryTS = (pQuery->lastKey >= pBlockInfo->keyFirst && pQuery->lastKey <= pBlockInfo->keyLast) || + (pQuery->ekey >= pBlockInfo->keyFirst && pQuery->ekey <= pBlockInfo->keyLast) || + requireTimestamp(pQuery); + + return loadPrimaryTS; +} + +int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, int8_t *blkStatus, SQueryRuntimeEnv *pRuntimeEnv, + int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand) { + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterObj * pMeterObj = pRuntimeEnv->pMeterObj; + SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx]; + + TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; + + pQuery->slot = slotIdx; + pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : pBlock->numOfPoints - 1; + + SET_FILE_BLOCK_FLAG(*blkStatus); + SET_DATA_BLOCK_NOT_LOADED(*blkStatus); + + if (((pQuery->lastKey <= pBlock->keyFirst && pQuery->ekey >= pBlock->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->ekey <= pBlock->keyFirst && pQuery->lastKey >= pBlock->keyLast && !QUERY_IS_ASC_QUERY(pQuery))) && + onDemand) { + int32_t req = 0; + if (pQuery->numOfFilterCols > 0) { + req = BLK_DATA_ALL_NEEDED; + } else { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + int32_t functID = pQuery->pSelectExpr[i].pBase.functionId; + req |= aAggs[functID].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlock->keyFirst, pBlock->keyLast, + pQuery->pSelectExpr[i].pBase.colInfo.colId, *blkStatus); + } + } + + if (req == BLK_DATA_NO_NEEDED) { + qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, + pBlock->keyLast, pBlock->numOfPoints); + + setTimestampRange(pRuntimeEnv, pBlock->keyFirst, pBlock->keyLast); + } else if (req == BLK_DATA_FILEDS_NEEDED) { + if (loadDataBlockFieldsInfo(pRuntimeEnv, pQueryFileInfo, pBlock, pFields) < 0) { + return DISK_DATA_LOAD_FAILED; + } + } else { + assert(req == BLK_DATA_ALL_NEEDED); + goto _load_all; + } + } else { + _load_all: + // load sfield first + if (loadDataBlockFieldsInfo(pRuntimeEnv, pQueryFileInfo, pBlock, pFields) < 0) { + return DISK_DATA_LOAD_FAILED; + } + + if ((pQuery->lastKey <= pBlock->keyFirst && pQuery->ekey >= pBlock->keyLast && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey >= pBlock->keyLast && pQuery->ekey <= pBlock->keyFirst && !QUERY_IS_ASC_QUERY(pQuery))) { + /* + * if this block is completed included in the query range, do more filter operation + * filter the data block according to the value filter condition. + * no need to load the data block, continue for next block + */ + if (!needToLoadDataBlock(pQuery, *pFields, pRuntimeEnv->pCtx)) { +#if defined(_DEBUG_VIEW) + dTrace("QInfo:%p fileId:%d, slot:%d, block discarded by per-filter, ", GET_QINFO_ADDR(pQuery), pQuery->fileId, + pQuery->slot); +#endif + qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%lld-%lld, rows:%d", + GET_QINFO_ADDR(pQuery), pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, + pBlock->numOfPoints); + return DISK_DATA_DISCARDED; + } + } + + SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + bool loadTS = needPrimaryTimestampCol(pQuery, &binfo); + + /* + * the pRuntimeEnv->pMeterObj is not updated during loop, since which meter this block is belonged to is not matter + * in order to enforce to load the data block, we HACK the load check procedure, + * by changing pQuery->slot each time to IGNORE the pLoadInfo data check. It is NOT a normal way. + */ + int32_t ret = loadDataBlockIntoMem(pBlock, pFields, pRuntimeEnv, fileIdx, loadTS, false); + SET_DATA_BLOCK_LOADED(*blkStatus); + + if (ret < 0) { + return DISK_DATA_LOAD_FAILED; + } + + /* find first qualified record position in this block */ + if (loadTS) { + /* find first qualified record position in this block */ + pQuery->pos = + searchFn(pRuntimeEnv->primaryColBuffer->data, pBlock->numOfPoints, pQuery->lastKey, pQuery->order.order); + /* boundary timestamp check */ + assert(pBlock->keyFirst == primaryKeys[0] && pBlock->keyLast == primaryKeys[pBlock->numOfPoints - 1]); + } + + assert((pQuery->skey <= pQuery->lastKey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->ekey <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); + } + + return DISK_DATA_LOADED; +} + +bool onDemandLoadDatablock(SQuery *pQuery, int16_t queryRangeSet) { + return (pQuery->nAggTimeInterval == 0) || ((queryRangeSet == 1) && (pQuery->nAggTimeInterval > 0)); +} + +void saveIntervalQueryRange(SQuery *pQuery, SMeterQueryInfo *pInfo) { + pInfo->skey = pQuery->skey; + pInfo->ekey = pQuery->ekey; + pInfo->lastKey = pQuery->lastKey; + + assert(((pQuery->lastKey >= pQuery->skey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pQuery->lastKey <= pQuery->skey) && !QUERY_IS_ASC_QUERY(pQuery))); +} + +void restoreIntervalQueryRange(SQuery *pQuery, SMeterQueryInfo *pInfo) { + pQuery->skey = pInfo->skey; + pQuery->ekey = pInfo->ekey; + pQuery->lastKey = pInfo->lastKey; + + assert(((pQuery->lastKey >= pQuery->skey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pQuery->lastKey <= pQuery->skey) && !QUERY_IS_ASC_QUERY(pQuery))); +} + +static void validateResultBuf(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *sqinfo) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pSupporter->runtimeEnv.pQuery; + + tFilePage *newOutput = getFilePage(pSupporter, sqinfo->pageList[sqinfo->numOfPages - 1]); + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + assert(pRuntimeEnv->pCtx[i].aOutputBuf - newOutput->data < DEFAULT_INTERN_BUF_SIZE); + } +} + +void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *sqinfo, int32_t numOfResult) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + if (numOfResult <= 0) { + return; + } + + assert(sqinfo->lastResRows == 1); + numOfResult = 1; + sqinfo->lastResRows = 0; + + if (IS_SUPPLEMENT_SCAN(pRuntimeEnv) && sqinfo->reverseFillRes == 1) { + assert(sqinfo->numOfRes > 0 && sqinfo->reverseIndex > 0 && sqinfo->reverseIndex <= sqinfo->numOfRes); + // backward one step from the previous position, the start position is (sqinfo->numOfRows-1); + sqinfo->reverseIndex -= 1; + setCtxOutputPointerForSupplementScan(pSupporter, sqinfo); + } else { + int32_t pageId = sqinfo->pageList[sqinfo->numOfPages - 1]; + tFilePage *pData = getFilePage(pSupporter, pageId); + + // in handling records occuring around '1970-01-01', the aligned start + // timestamp may be 0. + TSKEY ts = *(TSKEY *)getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, 0); + assert(ts >= 0); + + SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; + qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%lld, total:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, + pMeterObj->sid, pMeterObj->meterId, ts, sqinfo->numOfRes + 1); + + pData->numOfElems += numOfResult; + sqinfo->numOfRes += numOfResult; + assert(pData->numOfElems <= pRuntimeEnv->numOfRowsPerPage); + + setOutputBufferForIntervalQuery(pSupporter, sqinfo); + + validateResultBuf(pSupporter, sqinfo); + initCtxOutputBuf(pRuntimeEnv); +#if 0 + SSchema sc[TSDB_MAX_COLUMNS] = {0}; + sc[0].type = TSDB_DATA_TYPE_BIGINT; + sc[0].bytes = 8; + + sc[1].type = TSDB_DATA_TYPE_BIGINT; + sc[1].bytes = 8; + + UNUSED(sc); + tColModel *cm = tColModelCreate(sc, pQuery->numOfOutputCols, pRuntimeEnv->numOfRowsPerPage); + +// if (outputPage->numOfElems + numOfResult >= pRuntimeEnv->numOfRowsPerPage) + tColModelDisplay(cm, outputPage->data, outputPage->numOfElems, pRuntimeEnv->numOfRowsPerPage); +#endif + } +} + +static int32_t doCopyFromGroupBuf(SMeterQuerySupportObj *pSupporter, SOutputRes *result, int32_t orderType) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + int32_t numOfResult = 0; + /* pointsToRead is the max number of rows of results for output*/ + + int32_t startIdx = 0; + int32_t forward = 1; + + dTrace("QInfo:%p start to copy data to dest buf", GET_QINFO_ADDR(pSupporter->runtimeEnv.pQuery)); + + if (orderType == TSQL_SO_ASC) { + startIdx = pSupporter->subgroupIdx; + } else { // desc + startIdx = pSupporter->pSidSet->numOfSubSet - pSupporter->subgroupIdx - 1; + forward = -1; + } + + for (int32_t i = startIdx; (i < pSupporter->pSidSet->numOfSubSet) && (i >= 0); i += forward) { + if (result[i].numOfRows == 0) { + pSupporter->offset = 0; + pSupporter->subgroupIdx += 1; + continue; + } + + assert(result[i].numOfRows <= 1 && pSupporter->offset <= 1); + + tFilePage **srcBuf = result[i].result; + + int32_t numOfRowsToCopy = result[i].numOfRows - pSupporter->offset; + int32_t oldOffset = pSupporter->offset; + + if (numOfRowsToCopy > pQuery->pointsToRead - numOfResult) { + /* current output space is not enough for the keep the data of this group + */ + numOfRowsToCopy = pQuery->pointsToRead - numOfResult; + pSupporter->offset += numOfRowsToCopy; + } else { + pSupporter->offset = 0; + pSupporter->subgroupIdx += 1; + } + + for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) { + int32_t elemSize = pRuntimeEnv->pCtx[j].outputBytes; + char * outputBuf = pQuery->sdata[j]->data + numOfResult * elemSize; + + memcpy(outputBuf, srcBuf[j]->data + oldOffset * elemSize, elemSize * numOfRowsToCopy); + } + + numOfResult += numOfRowsToCopy; + if (numOfResult == pQuery->pointsToRead) { + break; + } + } + + dTrace("QInfo:%p done copy data to dst buf", GET_QINFO_ADDR(pSupporter->runtimeEnv.pQuery)); + +#ifdef _DEBUG_VIEW + displayInterResult(pQuery->sdata, pQuery, numOfResult); +#endif + return numOfResult; +} + +/** + * copyFromGroupBuf support copy data in ascending/descending order + * @param pQInfo + * @param result + */ +void copyFromGroupBuf(SQInfo *pQInfo, SOutputRes *result) { + SQuery * pQuery = &pQInfo->query; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + int32_t orderType = (pQuery->pGroupbyExpr != NULL) ? pQuery->pGroupbyExpr->orderType : TSQL_SO_DESC; + + int32_t numOfResult = doCopyFromGroupBuf(pSupporter, result, orderType); + + pQuery->pointsRead += numOfResult; + assert(pQuery->pointsRead <= pQuery->pointsToRead); + + if (pSupporter->subgroupIdx == pSupporter->pSidSet->numOfSubSet) { + /* set the end of query flag */ + pSupporter->meterIdx = pSupporter->numOfMeters; + } +} + +// todo refactor according to its called env!! +static void getAlignedIntervalQueryRange(SQuery *pQuery, TSKEY keyInData, TSKEY skey, TSKEY ekey) { + if (pQuery->nAggTimeInterval == 0) { + return; + } + + doGetAlignedIntervalQueryRange(pQuery, keyInData, skey, ekey); +} + +static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data, + int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus, + SField *pFields, __block_search_fn_t searchFn) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + SMeterQueryInfo * pInfo = pInfoEx->pMeterQInfo; + + /* + * for each block, we need to handle the previous query, since the determination of previous query being completed + * or not is based on the start key of current block. + */ + TSKEY key = getNextAccessedKeyInData(pQuery, pPrimaryData, pBlockInfo, blockStatus); + setIntervalQueryRange(pSupporter, key, pInfoEx); + if (((pQuery->skey > pQuery->ekey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pQuery->skey < pQuery->ekey) && !QUERY_IS_ASC_QUERY(pQuery))) { + return; + } + + if (((pBlockInfo->keyLast < pQuery->ekey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pBlockInfo->keyFirst > pQuery->ekey) && !QUERY_IS_ASC_QUERY(pQuery))) { + int32_t numOfRes = 0; + /* current block is included in this interval */ + int32_t steps = applyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, pPrimaryData, data, pFields, searchFn, &numOfRes); + assert(numOfRes <= 1 && numOfRes >= 0 && steps > 0); + + if (pInfoEx->pMeterQInfo->lastResRows == 0) { + pInfoEx->pMeterQInfo->lastResRows = numOfRes; + } else { + assert(pInfoEx->pMeterQInfo->lastResRows == 1); + } + + saveIntervalQueryRange(pQuery, pInfo); + } else { + doApplyIntervalQueryOnBlock(pSupporter, pInfoEx->pMeterQInfo, pBlockInfo, pPrimaryData, data, pFields, searchFn); + } +} + +// we need to split the result into different packages. +int32_t vnodeGetResultSize(void *thandle, int32_t *numOfRows) { + SQInfo *pQInfo = (SQInfo *)thandle; + return pQInfo->query.rowSize * (*numOfRows); +} + +int64_t vnodeGetOffsetVal(void *thandle) { + SQInfo *pQInfo = (SQInfo *)thandle; + return pQInfo->query.limit.offset; +} + +bool vnodeHasRemainResults(void *handle) { + SQInfo * pQInfo = (SQInfo *)handle; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + if (pSupporter == NULL || pQInfo->query.interpoType == TSDB_INTERPO_NONE) { + return false; + } + + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + SInterpolationInfo *pInterpoInfo = &pRuntimeEnv->interpoInfo; + + if (pQuery->limit.limit > 0 && pQInfo->pointsRead >= pQuery->limit.limit) { + return false; + } + + int32_t remain = taosNumOfRemainPoints(pInterpoInfo); + if (remain > 0) { + return true; + } else { + if (pRuntimeEnv->pInterpoBuf == NULL) { + return false; + } + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { + /* query has completed */ + TSKEY ekey = taosGetRevisedEndKey(pSupporter->rawEKey, pQuery->order.order, pQuery->nAggTimeInterval, + pQuery->intervalTimeUnit); + int32_t numOfTotal = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY *)pRuntimeEnv->pInterpoBuf[0]->data, + remain, pQuery->nAggTimeInterval, ekey, pQuery->pointsToRead); + return numOfTotal > 0; + } + + return false; + } +} + +static int32_t resultInterpolate(SQInfo *pQInfo, tFilePage **data, tFilePage **pDataSrc, int32_t numOfRows, + int32_t outputRows) { + SQuery * pQuery = &pQInfo->query; + SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; + + assert(pRuntimeEnv->pCtx[0].outputBytes == TSDB_KEYSIZE); + + // build support structure for performing interpolation + SSchema *pSchema = calloc(1, sizeof(SSchema) * pQuery->numOfOutputCols); + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + pSchema[i].bytes = pRuntimeEnv->pCtx[i].outputBytes; + pSchema[i].type = pQuery->pSelectExpr[i].resType; + } + + tColModel *pModel = tColModelCreate(pSchema, pQuery->numOfOutputCols, pQuery->pointsToRead); + + char * srcData[TSDB_MAX_COLUMNS] = {0}; + int32_t functions[TSDB_MAX_COLUMNS] = {0}; + + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + srcData[i] = pDataSrc[i]->data; + functions[i] = pQuery->pSelectExpr[i].pBase.functionId; + } + + int32_t numOfRes = taosDoInterpoResult(&pRuntimeEnv->interpoInfo, pQuery->interpoType, data, numOfRows, outputRows, + pQuery->nAggTimeInterval, (int64_t *)pDataSrc[0]->data, pModel, srcData, + pQuery->defaultVal, functions, pRuntimeEnv->pMeterObj->pointsPerFileBlock); + + tColModelDestroy(pModel); + free(pSchema); + + return numOfRes; +} + +/** + * + * @param handle + * @param data + * @param numOfRows the number of rows that are not returned in current retrieve + * @return + */ +int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows) { + SQInfo *pQInfo = (SQInfo *)handle; + + SMeterObj *pObj = pQInfo->pObj; + SQuery * pQuery = &pQInfo->query; + + assert(pQuery->pSelectExpr != NULL && pQuery->numOfOutputCols > 0); + + // copy data into dst buffer directly + int tnumOfRows = vnodeList[pObj->vnode].cfg.rowsInFileBlock; + char *pData = data; + + /* for metric query, bufIndex always be 0. */ + for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { // pQInfo->bufIndex == 0 + int32_t bytes = pQuery->pSelectExpr[col].resBytes; + + memmove(pData, pQuery->sdata[col]->data + bytes * tnumOfRows * pQInfo->bufIndex, bytes * numOfRows); + pData += bytes * numOfRows; + } + + return numOfRows; +} + +int32_t vnodeQueryResultInterpolate(SQInfo *pQInfo, tFilePage **pDst, tFilePage **pDataSrc, int32_t numOfRows, + int32_t *numOfInterpo) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = pRuntimeEnv->pQuery; + + while (1) { + numOfRows = taosNumOfRemainPoints(&pRuntimeEnv->interpoInfo); + + TSKEY ekey = taosGetRevisedEndKey(pSupporter->rawEKey, pQuery->order.order, pQuery->nAggTimeInterval, + pQuery->intervalTimeUnit); + int32_t numOfFinalRows = + taosGetNumOfResultWithInterpo(&pRuntimeEnv->interpoInfo, (TSKEY *)pDataSrc[0]->data, numOfRows, + pQuery->nAggTimeInterval, ekey, pQuery->pointsToRead); + + int32_t ret = resultInterpolate(pQInfo, pDst, pDataSrc, numOfRows, numOfFinalRows); + assert(ret == numOfFinalRows); + + if (pQuery->limit.offset == 0) { + /* reached the start position of according to offset value, return immediately */ + return ret; + } + + if (pQuery->limit.offset < ret) { + ret -= pQuery->limit.offset; + // todo !!!!there exactly number of interpo is not valid. + // todo refactor move to the beginning of buffer + if (QUERY_IS_ASC_QUERY(pQuery)) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].resBytes * pQuery->limit.offset, + ret * pQuery->pSelectExpr[i].resBytes); + } + } else { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memmove(pDst[i]->data + (pQuery->pointsToRead - ret) * pQuery->pSelectExpr[i].resBytes, + pDst[i]->data + (pQuery->pointsToRead - ret - pQuery->limit.offset) * pQuery->pSelectExpr[i].resBytes, + ret * pQuery->pSelectExpr[i].resBytes); + } + } + pQuery->limit.offset = 0; + return ret; + } else { + pQuery->limit.offset -= ret; + ret = 0; + } + + if (!vnodeHasRemainResults(pQInfo)) { + return ret; + } + } +} + +void vnodePrintQueryStatistics(SMeterQuerySupportObj *pSupporter) { + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + + SQuery *pQuery = pRuntimeEnv->pQuery; + SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery); + + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + pSummary->tmpBufferInDisk = pSupporter->bufSize; + + dTrace("QInfo:%p statis: comp blocks:%d, size:%d Bytes, elapsed time:%.2f ms", pQInfo, pSummary->readCompInfo, + pSummary->totalCompInfoSize, pSummary->loadCompInfoUs / 1000.0); + + dTrace("QInfo:%p statis: field info: %d, size:%d Bytes, avg size:%.2f Bytes, elapsed time:%.2f ms", pQInfo, + pSummary->readField, pSummary->totalFieldSize, (double)pSummary->totalFieldSize / pSummary->readField, + pSummary->loadFieldUs / 1000.0); + + dTrace("QInfo:%p statis: file blocks:%d, size:%d Bytes, elapsed time:%.2f ms, skipped:%d, in-memory gen null:%d Bytes", + pQInfo, pSummary->readDiskBlocks, pSummary->totalBlockSize, pSummary->loadBlocksUs / 1000.0, + pSummary->skippedFileBlocks, pSummary->totalGenData); + + dTrace("QInfo:%p statis: cache blocks:%d", pQInfo, pSummary->blocksInCache, 0); + dTrace("QInfo:%p statis: temp file:%d Bytes", pQInfo, pSummary->tmpBufferInDisk); + + dTrace("QInfo:%p statis: file:%d, table:%d", pQInfo, pSummary->numOfFiles, pSummary->numOfTables); + dTrace("QInfo:%p statis: seek ops:%d", pQInfo, pSummary->numOfSeek); + + double total = pSummary->fileTimeUs + pSummary->cacheTimeUs; + double io = pSummary->loadCompInfoUs + pSummary->loadBlocksUs + pSummary->loadFieldUs; + // assert(io <= pSummary->fileTimeUs); + + // todo add the intermediate result save cost!! + double computing = total - io; + + dTrace("QInfo:%p statis: total elapsed time:%.2f ms, file:%.2f ms(%.2f%), cache:%.2f ms(%.2f%). io:%.2f ms(%.2f%)," + "comput:%.2fms(%.2f%)", + pQInfo, total / 1000.0, pSummary->fileTimeUs / 1000.0, pSummary->fileTimeUs * 100 / total, + pSummary->cacheTimeUs / 1000.0, pSummary->cacheTimeUs * 100 / total, io / 1000.0, io * 100 / total, + computing / 1000.0, computing * 100 / total); +} diff --git a/src/system/src/vnodeQueryProcess.c b/src/system/src/vnodeQueryProcess.c new file mode 100644 index 000000000000..919b13416f55 --- /dev/null +++ b/src/system/src/vnodeQueryProcess.c @@ -0,0 +1,1136 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "taosmsg.h" +#include "textbuffer.h" +#include "ttime.h" +#include "vnode.h" +#include "vnodeRead.h" +#include "vnodeUtil.h" + +#include "vnodeQueryImpl.h" + +static bool doCheckWithPrevQueryRange(SQInfo *pQInfo, TSKEY nextKey, SMeterDataInfo *pMeterInfo) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQuery * pQuery = &pQInfo->query; + SMeterObj * pMeterObj = pMeterInfo->pMeterObj; + + /* no data for current query */ + if ((nextKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (nextKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + if (((nextKey > pSupporter->rawEKey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((nextKey < pSupporter->rawEKey) && (!QUERY_IS_ASC_QUERY(pQuery)))) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in block, ignore", pQInfo, pMeterObj->vnode, + pMeterObj->sid, pMeterObj->meterId); + + return false; + } else { // in case of interval query, forward the query range + setIntervalQueryRange(pSupporter, nextKey, pMeterInfo); + } + } + + return true; +} + +static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { + SQuery * pQuery = &pQInfo->query; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQueryRuntimeEnv * pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; + + SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; + + SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid); + assert(pTempMeterObj != NULL); + + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeterObj->searchAlgorithm]; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + + dTrace("QInfo:%p start to query data in cache", pQInfo); + int64_t st = taosGetTimestampUs(); + int32_t totalBlocks = 0; + + for (int32_t groupIdx = 0; groupIdx < pSupporter->pSidSet->numOfSubSet; ++groupIdx) { + int32_t start = pSupporter->pSidSet->starterPos[groupIdx]; + int32_t end = pSupporter->pSidSet->starterPos[groupIdx + 1] - 1; + + if (isQueryKilled(pQuery)) { + return pMeterInfo; + } + + for (int32_t k = start; k <= end; ++k) { + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid); + if (pMeterObj == NULL) { + dError("QInfo:%p failed to find meterId:%d, continue", pQInfo, pMeterSidExtInfo[k]->sid); + continue; + } + + pQInfo->pObj = pMeterObj; + pRuntimeEnv->pMeterObj = pMeterObj; + + setMeterQueryInfo(pSupporter, &pMeterInfo[k]); + if (pMeterInfo[k].pMeterObj == NULL) { /* no data in disk for this meter, set its pointer */ + setMeterDataInfo(&pMeterInfo[k], pMeterObj, k, groupIdx); + } + + assert(pMeterInfo[k].meterOrderIdx == k && pMeterObj == pMeterInfo[k].pMeterObj); + + SMeterQueryInfo *pMeterQueryInfo = pMeterInfo[k].pMeterQInfo; + restoreIntervalQueryRange(pQuery, pMeterQueryInfo); + + /* + * Update the query meter column index and the corresponding filter column index + * the original column index info may be inconsistent with current meter in cache. + * + * The stable schema has been changed, but the meter schema, along with the data in cache, + * will not be updated until data with new schema arrive. + */ + vnodeUpdateQueryColumnIndex(pQuery, pMeterObj); + vnodeUpdateFilterColumnIndex(pQuery); + + if (pQuery->nAggTimeInterval == 0) { + if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + dTrace("QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan data in cache. qrange:%lld-%lld, lastKey:%lld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, + pQuery->lastKey); + + continue; + } + + setExecutionContext(pSupporter, pSupporter->pResult, pMeterInfo[k].meterOrderIdx, pMeterInfo[k].groupIdx); + } else { + setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo); + } + + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, + pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); + + /* + * find the appropriated start position in cache + * NOTE: (taking ascending order query for example) + * for the specific query range [pQuery->lastKey, pQuery->ekey], there may be no qualified result in cache. + * Therefore, we need the first point that is greater(less) than the pQuery->lastKey, so the boundary check + * should be ignored (the fourth parameter). + */ + TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, true); + if (nextKey < 0) { + qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%lld", pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->numOfBlocks, pQuery->lastKey); + continue; + } + + // data in this block may be flushed to disk and this block is allocated to other meter + // todo try with remain cache blocks + SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + if (pBlock == NULL) { + continue; + } + + if (!doCheckWithPrevQueryRange(pQInfo, nextKey, &pMeterInfo[k])) { + continue; + } + + SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; + for (int32_t i = 0; i < pCacheInfo->maxBlocks; ++i) { + pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + + // cache block may be flushed to disk, so it is not available, ignore it and try next + if (pBlock == NULL) { + pQuery->slot = (pQuery->slot + step + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + continue; + } + + TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0]; + // in handling file data block, this query condition is checked during fetching candidate file blocks + if ((primaryKeys[pQuery->pos] > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) || + (primaryKeys[pQuery->pos] < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery))) { + break; + } + + /* only record the key on last block */ + SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); + SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); + + dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d", + GET_QINFO_ADDR(pQuery), binfo.keyFirst, binfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, + pRuntimeEnv->blockStatus); + + totalBlocks++; + queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pBlock, &binfo, &pMeterInfo[k], NULL, + searchFn); + + // todo refactor + if ((pQuery->slot == pQuery->currentSlot && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->slot == pQuery->firstSlot && !QUERY_IS_ASC_QUERY(pQuery))) { + break; + } + + // try next cache block + pQuery->slot = (pQuery->slot + step + pCacheInfo->maxBlocks) % pCacheInfo->maxBlocks; + if (QUERY_IS_ASC_QUERY(pQuery)) { + pQuery->pos = 0; + } else { // backwards traverse encounter the cache invalid, abort scan cache. + SCacheBlock *pNextBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot); + if (pNextBlock == NULL) { + break; // todo fix + } else { + pQuery->pos = pNextBlock->numOfPoints - 1; + } + } + } + } + } + + int64_t time = taosGetTimestampUs() - st; + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + + pSummary->blocksInCache += totalBlocks; + pSummary->cacheTimeUs += time; + pSummary->numOfTables = pSupporter->pSidSet->numOfSids; + + dTrace("QInfo:%p complete check %d cache blocks, elapsed time:%.3fms", pQInfo, totalBlocks, time / 1000.0); + + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + + return pMeterInfo; +} + +static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterQuerySupportObj *pSupporter, + SMeterDataInfo *pMeterDataInfo) { + SQuery * pQuery = &pQInfo->query; + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + + SMeterDataBlockInfoEx *pDataBlockInfoEx = NULL; + int32_t nAllocBlocksInfoSize = 0; + + SMeterObj * pTempMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pMeterSidExtInfo[0]->sid); + __block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeter->searchAlgorithm]; + + int32_t vnodeId = pTempMeter->vnode; + dTrace("QInfo:%p start to check data blocks in %d files", pQInfo, pRuntimeEnv->numOfFiles); + + int32_t fid = QUERY_IS_ASC_QUERY(pQuery) ? -1 : INT32_MAX; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + SQueryCostStatistics *pSummary = &pRuntimeEnv->summary; + + int64_t totalBlocks = 0; + int64_t st = taosGetTimestampUs(); + + while (1) { + if (isQueryKilled(pQuery)) { + break; + } + + int32_t fileIdx = vnodeGetVnodeHeaderFileIdx(&fid, pRuntimeEnv, pQuery->order.order); + if (fileIdx < 0) { + // no valid file, abort current search + break; + } + + pRuntimeEnv->startPos.fileId = fid; + pQuery->fileId = fid; + pSummary->numOfFiles++; + + SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx]; + char * pHeaderData = pQueryFileInfo->pHeaderFileData; + + int32_t numOfQualifiedMeters = 0; + SMeterDataInfo **pReqMeterDataInfo = vnodeFilterQualifiedMeters( + pQInfo, vnodeId, pQueryFileInfo, pSupporter->pSidSet, pMeterDataInfo, &numOfQualifiedMeters); + dTrace("QInfo:%p file:%s, %d meters qualified", pQInfo, pQueryFileInfo->dataFilePath, numOfQualifiedMeters); + + /* none of meters in query set have pHeaderData in this file, try next file + */ + if (numOfQualifiedMeters == 0) { + fid += step; + tfree(pReqMeterDataInfo); + continue; + } + + uint32_t numOfBlocks = getDataBlocksForMeters(pSupporter, pQuery, pHeaderData, numOfQualifiedMeters, pQueryFileInfo, + pReqMeterDataInfo); + + dTrace("QInfo:%p file:%s, %d meters contains %d blocks to be checked", pQInfo, pQueryFileInfo->dataFilePath, + numOfQualifiedMeters, numOfBlocks); + if (numOfBlocks == 0) { + fid += step; + tfree(pReqMeterDataInfo); + continue; + } + + createDataBlocksInfoEx(pReqMeterDataInfo, numOfQualifiedMeters, &pDataBlockInfoEx, numOfBlocks, + &nAllocBlocksInfoSize, (int64_t)pQInfo); + + dTrace("QInfo:%p start to load %d blocks and check", pQInfo, numOfBlocks); + int64_t TRACE_OUTPUT_BLOCK_CNT = 10000; + int64_t stimeUnit = 0; + int64_t etimeUnit = 0; + + totalBlocks += numOfBlocks; + + // sequentially scan the pHeaderData file + int32_t j = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfBlocks - 1; + + for (; j < numOfBlocks && j >= 0; j += step) { + if (isQueryKilled(pQuery)) { + break; + } + + /* output elapsed time for log every TRACE_OUTPUT_BLOCK_CNT blocks */ + if (j == 0) { + stimeUnit = taosGetTimestampMs(); + } else if ((j % TRACE_OUTPUT_BLOCK_CNT) == 0) { + etimeUnit = taosGetTimestampMs(); + dTrace("QInfo:%p load and check %ld blocks, and continue. elapsed:%ldms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, + etimeUnit - stimeUnit); + stimeUnit = taosGetTimestampMs(); + } + + SMeterDataBlockInfoEx *pInfoEx = &pDataBlockInfoEx[j]; + SMeterDataInfo * pOneMeterDataInfo = pInfoEx->pMeterDataInfo; + SMeterQueryInfo * pMeterQueryInfo = pOneMeterDataInfo->pMeterQInfo; + SMeterObj * pMeterObj = pOneMeterDataInfo->pMeterObj; + + pQInfo->pObj = pMeterObj; + pRuntimeEnv->pMeterObj = pMeterObj; + + restoreIntervalQueryRange(pQuery, pMeterQueryInfo); + + if (pQuery->nAggTimeInterval == 0) { // normal query + if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + qTrace("QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%lld-%lld, " + "lastKey:%lld", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, + pQuery->lastKey); + + continue; + } + setExecutionContext(pSupporter, pSupporter->pResult, pOneMeterDataInfo->meterOrderIdx, + pOneMeterDataInfo->groupIdx); + } else { // interval query + setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo); + } + + SCompBlock *pBlock = pInfoEx->pBlock.compBlock; + bool ondemandLoad = onDemandLoadDatablock(pQuery, pMeterQueryInfo->queryRangeSet); + int32_t ret = LoadDatablockOnDemand(pBlock, &pInfoEx->pBlock.fields, &pRuntimeEnv->blockStatus, pRuntimeEnv, + fileIdx, pInfoEx->blockIndex, searchFn, ondemandLoad); + if (ret != DISK_DATA_LOADED) { + pSummary->skippedFileBlocks++; + continue; + } + + SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK); + + assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints); + TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data; + + if (IS_DATA_BLOCK_LOADED(pRuntimeEnv->blockStatus) && needPrimaryTimestampCol(pQuery, &binfo)) { + TSKEY nextKey = primaryKeys[pQuery->pos]; + if (!doCheckWithPrevQueryRange(pQInfo, nextKey, pOneMeterDataInfo)) { + continue; + } + } else { + // if data block is not loaded, it must be the intermediate blocks + assert((pBlock->keyFirst >= pQuery->lastKey && pBlock->keyLast <= pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (pBlock->keyFirst >= pQuery->ekey && pBlock->keyLast <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery))); + } + + queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pRuntimeEnv->colDataBuffer, &binfo, + pOneMeterDataInfo, pInfoEx->pBlock.fields, searchFn); + } + + tfree(pReqMeterDataInfo); + + // try next file + fid += step; + } + + int64_t time = taosGetTimestampUs() - st; + dTrace("QInfo:%p complete check %d files, %d blocks, elapsed time:%.3fms", pQInfo, pRuntimeEnv->numOfFiles, + totalBlocks, time / 1000.0); + + pSummary->fileTimeUs += time; + pSummary->readDiskBlocks += totalBlocks; + pSummary->numOfTables = pSupporter->pSidSet->numOfSids; + + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + freeMeterBlockInfoEx(pDataBlockInfoEx, nAllocBlocksInfoSize); + return pMeterDataInfo; +} + +static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool *dataInCache, int32_t index, + int32_t start) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = &pQInfo->query; + + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); + + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[index]->sid); + if (pMeterObj == NULL) { + dError("QInfo:%p do not find required meter id: %d, all meterObjs id is:", pQInfo, pMeterSidExtInfo[index]->sid); + return false; + } + + vnodeSetTagValueInParam(pSupporter->pSidSet, pRuntimeEnv, pMeterSidExtInfo[index]); + + dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%lld-%lld", pQInfo, index - start, pMeterObj->vnode, + pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey); + + pQInfo->pObj = pMeterObj; + pQuery->lastKey = pQuery->skey; + pRuntimeEnv->pMeterObj = pMeterObj; + + vnodeCheckIfDataExists(pRuntimeEnv, pMeterObj, dataInDisk, dataInCache); + + if (pQuery->lastKey > pMeterObj->lastKey && QUERY_IS_ASC_QUERY(pQuery)) { + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%lld-%lld, nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, + pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery); + return false; + } + + return true; +} + +static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start) { + SQuery * pQuery = &pQInfo->query; + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + + bool dataInDisk = true; + bool dataInCache = true; + if (!multimeterMultioutputHelper(pQInfo, &dataInDisk, &dataInCache, index, start)) { + return 0; + } + +#if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP + for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) { + resetMMapWindow(&pRuntimeEnv->pHeaderFiles[i]); + } +#endif + SPointInterpoSupporter pointInterpSupporter = {0}; + pointInterpSupporterInit(pQuery, &pointInterpSupporter); + + if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter)) { + pointInterpSupporterDestroy(&pointInterpSupporter); + return 0; + } + + /* + * here we set the value for before and after the specified time into the + * parameter for + * interpolation query + */ + pointInterpSupporterSetData(pQInfo, &pointInterpSupporter); + pointInterpSupporterDestroy(&pointInterpSupporter); + + vnodeScanAllData(pRuntimeEnv); + doFinalizeResult(pRuntimeEnv); + + int64_t numOfRes = getNumOfResult(pRuntimeEnv); + assert(numOfRes == 1 || numOfRes == 0); + + // accumulate the point interpolation result + if (numOfRes > 0) { + pQuery->pointsRead += numOfRes; + forwardCtxOutputBuf(pRuntimeEnv, numOfRes); + } + + return numOfRes; +} + +static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + + SQuery * pQuery = &pQInfo->query; + tSidSet *pSids = pSupporter->pSidSet; + + SMeterObj *pOneMeter = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid); + + resetCtxOutputBuf(pRuntimeEnv); + cleanCtxOutputBuf(pRuntimeEnv); + initCtxOutputBuf(pRuntimeEnv); + + if (isPointInterpoQuery(pQuery)) { + assert(pQuery->limit.offset == 0 && pQuery->limit.limit != 0); + + while (pSupporter->subgroupIdx < pSids->numOfSubSet) { + int32_t start = pSids->starterPos[pSupporter->subgroupIdx]; + int32_t end = pSids->starterPos[pSupporter->subgroupIdx + 1] - 1; + + if (isFirstLastRowQuery(pQuery)) { + dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode, + pSids->numOfSubSet, pSupporter->subgroupIdx); + + TSKEY key = -1; + int32_t index = -1; + + // choose the last key for one group + pSupporter->meterIdx = start; + + for (int32_t k = start; k <= end; ++k, pSupporter->meterIdx++) { + if (isQueryKilled(pQuery)) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return; + } + + SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid); + if (pMeterObj != NULL) { + if (key < pMeterObj->lastKey) { + key = pMeterObj->lastKey; + index = k; + } + } + } + + pQuery->skey = key; + pQuery->ekey = key; + pSupporter->rawSKey = key; + pSupporter->rawEKey = key; + + int64_t num = doCheckMetersInGroup(pQInfo, index, start); + assert(num >= 0); + } else { + dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode, + pSids->numOfSubSet, pSupporter->subgroupIdx); + + for (int32_t k = start; k <= end; ++k) { + if (isQueryKilled(pQuery)) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return; + } + + pQuery->skey = pSupporter->rawSKey; + pQuery->ekey = pSupporter->rawEKey; + + int64_t res = doCheckMetersInGroup(pQInfo, k, start); + if (res == 1) { + break; + } + } + } + + pSupporter->subgroupIdx++; + + // output buffer is full, return to client + if (pQuery->pointsRead >= pQuery->pointsToRead) { + break; + } + } + } else { + int32_t start = pSids->starterPos[0]; + int32_t end = pSids->starterPos[1] - 1; + + // NOTE: for group by interpolation query, the number of subset may be greater than 1 + assert(pSids->numOfSubSet == 1 && start == 0 && end == pSids->numOfSids - 1 && pSupporter->meterIdx >= start && + pSupporter->meterIdx <= end); + + for (int32_t k = pSupporter->meterIdx; k <= end; ++k, ++pSupporter->meterIdx) { + if (isQueryKilled(pQuery)) { + setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); + return; + } + + bool dataInDisk = true; + bool dataInCache = true; + if (!multimeterMultioutputHelper(pQInfo, &dataInDisk, &dataInCache, k, start)) { + pQuery->skey = pSupporter->rawSKey; + pQuery->ekey = pSupporter->rawEKey; + continue; + } + +#if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP + for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) { + resetMMapWindow(&pRuntimeEnv->pHeaderFiles[i]); + } +#endif + SPointInterpoSupporter pointInterpSupporter = {0}; + if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter) == false) { + pQuery->skey = pSupporter->rawSKey; + pQuery->ekey = pSupporter->rawEKey; + continue; + } + + if (pQuery->numOfFilterCols == 0 && pQuery->limit.offset > 0) { + forwardQueryStartPosition(pRuntimeEnv); + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { + pQuery->skey = pSupporter->rawSKey; + pQuery->ekey = pSupporter->rawEKey; + continue; + } + } + + vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj); + vnodeUpdateFilterColumnIndex(pQuery); + + vnodeScanAllData(pRuntimeEnv); + doFinalizeResult(pRuntimeEnv); + + pQuery->pointsRead = getNumOfResult(pRuntimeEnv); + doSkipResults(pRuntimeEnv); + + // set query completed + if (doRevisedResultsByLimit(pQInfo)) { + pSupporter->meterIdx = pSupporter->pSidSet->numOfSids; + break; + } + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { + /* + * query range is identical in terms of all meters involved in query, + * so we need to restore them at the *beginning* of query on each meter, + * not the consecutive query on meter on which is aborted due to buffer limitation + * to ensure that, we can reset the query range once query on a meter is completed. + */ + pQuery->skey = pSupporter->rawSKey; + pQuery->ekey = pSupporter->rawEKey; + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { + pSupporter->meterIdx++; + break; + } + } else { + assert(Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); + + // forward query range + pQuery->skey = pQuery->lastKey; + break; + } + } + } + + pQInfo->pointsRead += pQuery->pointsRead; + pQuery->pointsOffset = pQuery->pointsToRead; + + moveDescOrderResultsToFront(pRuntimeEnv); + + dTrace("QInfo %p vid:%d, numOfMeters:%d, index:%d, numOfGroups:%d, %d points returned, totalRead:%d totalReturn:%d," + "next skey:%lld, offset:%ld", + pQInfo, pOneMeter->vnode, pSupporter->pSidSet->numOfSids, pSupporter->meterIdx, + pSupporter->pSidSet->numOfSubSet, + pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->skey, pQuery->limit.offset); +} + +static void doMultiMeterSupplementaryScan(SQInfo *pQInfo) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + + SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv; + SQuery * pQuery = &pQInfo->query; + + if (!needSupplementaryScan(pQuery)) { + dTrace("QInfo:%p no need to do supplementary scan, query completed", pQInfo); + return; + } + + SET_SUPPLEMENT_SCAN_FLAG(pRuntimeEnv); + disableFunctForSuppleScanAndSetSortOrder(pRuntimeEnv, pQuery->order.order); + + SWAP(pSupporter->rawSKey, pSupporter->rawEKey); + + for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { + SMeterQueryInfo *pMeterQInfo = pSupporter->pMeterDataInfo[i].pMeterQInfo; + if (pMeterQInfo != NULL) { + pMeterQInfo->skey = pSupporter->rawSKey; + pMeterQInfo->ekey = pSupporter->rawEKey; + pMeterQInfo->lastKey = pMeterQInfo->skey; + pMeterQInfo->queryRangeSet = 0; + + /* previous does not generate any results*/ + if (pMeterQInfo->numOfPages == 0) { + pMeterQInfo->reverseFillRes = 0; + } else { + pMeterQInfo->reverseIndex = pMeterQInfo->numOfRes; + pMeterQInfo->reverseFillRes = 1; + } + } + } + + int64_t st = taosGetTimestampMs(); + if (QUERY_IS_ASC_QUERY(pQuery)) { + pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter, pSupporter->pMeterDataInfo); + pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + } else { + pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter, pSupporter->pMeterDataInfo); + } + + SWAP(pSupporter->rawSKey, pSupporter->rawEKey); + enableFunctForMasterScan(pRuntimeEnv, pQuery->order.order); + SET_MASTER_SCAN_FLAG(pRuntimeEnv); + + int64_t et = taosGetTimestampMs(); + dTrace("QInfo:%p supplementary scan completed, elapsed time: %lldms", pQInfo, et - st); +} + +static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQuery * pQuery = &pQInfo->query; + + if (pSupporter->subgroupIdx > 0) { + /* + * if the subgroupIdx > 0, the query process must be completed yet, we only need to + * copy the data into output buffer + */ + if (pQuery->nAggTimeInterval > 0) { + copyResToQueryResultBuf(pSupporter, pQuery); + +#ifdef _DEBUG_VIEW + displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); +#endif + } else { + copyFromGroupBuf(pQInfo, pSupporter->pResult); + } + + pQInfo->pointsRead += pQuery->pointsRead; + + if (pQuery->pointsRead == 0) { + vnodePrintQueryStatistics(pSupporter); + } + dTrace("QInfo:%p points returned:%d, totalRead:%d totalReturn:%d", pQInfo, pQuery->pointsRead, pQInfo->pointsRead, + pQInfo->pointsReturned); + return; + } + + pSupporter->pMeterDataInfo = (SMeterDataInfo *)calloc(1, sizeof(SMeterDataInfo) * pSupporter->numOfMeters); + if (pSupporter->pMeterDataInfo == NULL) { + dError("QInfo:%p failed to allocate memory, %s", pQInfo, strerror(errno)); + return; + } + + dTrace("QInfo:%p query start, qrange:%lld-%lld, order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, + pQuery->order.order, pSupporter->pSidSet->numOfSubSet); + + dTrace("QInfo:%p main query scan start", pQInfo); + int64_t st = taosGetTimestampMs(); + + if (QUERY_IS_ASC_QUERY(pQuery)) { // order: asc + pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter, pSupporter->pMeterDataInfo); + pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + } else { // order: desc + pSupporter->pMeterDataInfo = queryOnMultiDataCache(pQInfo, pSupporter->pMeterDataInfo); + pSupporter->pMeterDataInfo = queryOnMultiDataFiles(pQInfo, pSupporter, pSupporter->pMeterDataInfo); + } + + int64_t et = taosGetTimestampMs(); + dTrace("QInfo:%p main scan completed, elapsed time: %lldms, supplementary scan start, order:%d", pQInfo, et - st, + pQuery->order.order ^ 1); + + doCloseAllOpenedResults(pSupporter); + doMultiMeterSupplementaryScan(pQInfo); + + if (isQueryKilled(pQuery)) { + dTrace("QInfo:%p query killed, abort", pQInfo); + return; + } + + if (pQuery->nAggTimeInterval > 0) { + assert(pSupporter->subgroupIdx == 0 && pSupporter->numOfGroupResultPages == 0); + + mergeMetersResultToOneGroups(pSupporter); + copyResToQueryResultBuf(pSupporter, pQuery); + +#ifdef _DEBUG_VIEW + displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); +#endif + } else { // not a interval query + copyFromGroupBuf(pQInfo, pSupporter->pResult); + } + + /* handle the limitation of output buffer */ + // displayInterResult(pQuery->sdata, pQuery, pQuery->pointsRead); + pQInfo->pointsRead += pQuery->pointsRead; + dTrace("QInfo:%p points returned:%d, totalRead:%d totalReturn:%d", pQInfo, pQuery->pointsRead, pQInfo->pointsRead, + pQInfo->pointsReturned); +} + +/* + * in each query, this function will be called only once, no retry for further result is not needed. + * + * select count(*)/top(field,k)/avg(field name) from table_name [where ts>now-1a] + */ +static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) { + SQuery * pQuery = &pQInfo->query; + SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; + + assert(pQuery->slot >= 0 && pQuery->pos >= 0); + + vnodeScanAllData(pRuntimeEnv); + doFinalizeResult(pRuntimeEnv); + + if (isQueryKilled(pQuery)) { + return; + } + + // since the numOfOutputElems must be identical for all sql functions that are allowed to be executed simutanelously. + pQuery->pointsRead = getNumOfResult(pRuntimeEnv); + assert(pQuery->pointsRead <= pQuery->pointsToRead && + Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)); + + // must be top/bottom query if offset > 0 + if (pQuery->limit.offset > 0) { + assert(isTopBottomQuery(pQuery)); + } + + doSkipResults(pRuntimeEnv); + doRevisedResultsByLimit(pQInfo); + moveDescOrderResultsToFront(pRuntimeEnv); + + pQInfo->pointsRead = pQuery->pointsRead; +} + +static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { + SQuery * pQuery = &pQInfo->query; + SMeterObj *pMeterObj = pQInfo->pObj; + + SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; + + while (1) { + vnodeScanAllData(pRuntimeEnv); + doFinalizeResult(pRuntimeEnv); + + if (isQueryKilled(pQuery)) { + return; + } + + pQuery->pointsRead = getNumOfResult(pRuntimeEnv); + if (pQuery->limit.offset > 0 && pQuery->numOfFilterCols > 0 && pQuery->pointsRead > 0) { + doSkipResults(pRuntimeEnv); + } + + /* + * 1. if pQuery->pointsRead == 0, pQuery->limit.offset >= 0, still need to check data + * 2. if pQuery->pointsRead > 0, pQuery->limit.offset must be 0 + */ + if (pQuery->pointsRead > 0 || Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { + break; + } + + TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); + assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); + + dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%lld, next qrange:%lld-%lld", pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->limit.offset, pQuery->lastKey, pQuery->ekey); + + resetCtxOutputBuf(pRuntimeEnv); + cleanCtxOutputBuf(pRuntimeEnv); + } + + doRevisedResultsByLimit(pQInfo); + moveDescOrderResultsToFront(pRuntimeEnv); + + pQInfo->pointsRead += pQuery->pointsRead; + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) { + TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); + assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); + + dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%lld-%lld", pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->lastKey, pQuery->ekey); + } + + dTrace("QInfo:%p vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d", pQInfo, pMeterObj->vnode, + pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned); + + resetCtxOutputBuf(pRuntimeEnv); + + pQuery->pointsOffset = pQuery->pointsToRead; // restore the available buffer + assert(pQuery->pointsRead <= pQuery->pointsToRead); +} + +static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter, SQueryRuntimeEnv *pRuntimeEnv) { + SQuery *pQuery = pRuntimeEnv->pQuery; + + while (1) { + assert((pQuery->skey <= pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (pQuery->skey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))); + + initCtxOutputBuf(pRuntimeEnv); + vnodeScanAllData(pRuntimeEnv); + if (isQueryKilled(pQuery)) { + return; + } + + assert(!Q_STATUS_EQUAL(pQuery->over, QUERY_NOT_COMPLETED)); + + // clear tag, used to decide if the whole interval query is completed or not + pQuery->over &= (~QUERY_COMPLETED); + doFinalizeResult(pRuntimeEnv); + + int64_t maxOutput = getNumOfResult(pRuntimeEnv); + + /* here we can ignore the records in case of no interpolation */ + if (pQuery->numOfFilterCols > 0 && pQuery->limit.offset > 0 && pQuery->interpoType == TSDB_INTERPO_NONE) { + /* maxOutput <= 0, means current query does not generate any results */ + // todo handle offset, in case of top/bottom interval query + if (maxOutput > 0) { + pQuery->limit.offset--; + } + } else { + pQuery->pointsRead += maxOutput; + forwardCtxOutputBuf(pRuntimeEnv, maxOutput); + } + + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { + break; + } + + forwardIntervalQueryRange(pSupporter, pRuntimeEnv); + if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED)) { + break; + } + + /* + * the scan limitation mechanism is upon here, + * 1. since there is only one(k) record is generated in one scan operation + * 2. remain space is not sufficient for next query output, abort + */ + if ((pQuery->pointsRead % pQuery->pointsToRead == 0 && pQuery->pointsRead != 0) || + ((pQuery->pointsRead + maxOutput) > pQuery->pointsToRead)) { + setQueryStatus(pQuery, QUERY_RESBUF_FULL); + break; + } + } +} + +/* handle time interval query on single table */ +static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) { + SQuery * pQuery = &(pQInfo->query); + SMeterObj *pMeterObj = pQInfo->pObj; + + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + SQueryRuntimeEnv * pRuntimeEnv = &pSupporter->runtimeEnv; + + int32_t numOfInterpo = 0; + + while (1) { + resetCtxOutputBuf(pRuntimeEnv); + vnodeSingleMeterIntervalMainLooper(pSupporter, pRuntimeEnv); + + // the offset is handled at prepare stage if no interpolation involved + if (pQuery->interpoType == TSDB_INTERPO_NONE) { + doRevisedResultsByLimit(pQInfo); + break; + } else { + taosInterpoSetStartInfo(&pRuntimeEnv->interpoInfo, pQuery->pointsRead, pQuery->interpoType); + SData **pInterpoBuf = pRuntimeEnv->pInterpoBuf; + + if (QUERY_IS_ASC_QUERY(pQuery)) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); + } + } else { + int32_t size = pMeterObj->pointsPerFileBlock; + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + memcpy(pInterpoBuf[i]->data, + pQuery->sdata[i]->data + (size - pQuery->pointsRead) * pQuery->pSelectExpr[i].resBytes, + pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes); + } + } + + numOfInterpo = 0; + pQuery->pointsRead = vnodeQueryResultInterpolate(pQInfo, (tFilePage **)pQuery->sdata, (tFilePage **)pInterpoBuf, + pQuery->pointsRead, &numOfInterpo); + + dTrace("QInfo: %p interpo completed, final:%d", pQInfo, pQuery->pointsRead); + if (pQuery->pointsRead > 0 || Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { + doRevisedResultsByLimit(pQInfo); + break; + } + + // no result generated yet, continue retrieve data + pQuery->pointsRead = 0; + } + } + + pQInfo->pointsRead += pQuery->pointsRead; + pQInfo->pointsInterpo += numOfInterpo; + + moveDescOrderResultsToFront(pRuntimeEnv); + + dTrace("%p vid:%d sid:%d id:%s, %d points returned %d points interpo, totalRead:%d totalInterpo:%d totalReturn:%d", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo, + pQInfo->pointsRead - pQInfo->pointsInterpo, pQInfo->pointsInterpo, pQInfo->pointsReturned); +} + +void vnodeSingleMeterQuery(SSchedMsg *pMsg) { + SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; + + if (pQInfo == NULL || pQInfo->pMeterQuerySupporter == NULL) { + dTrace("%p freed abort query", pQInfo); + return; + } + + if (pQInfo->killed) { + TSDB_QINFO_RESET_SIG(pQInfo); + dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + return; + } + + assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + + SQuery * pQuery = &pQInfo->query; + SMeterObj *pMeterObj = pQInfo->pObj; + + dTrace("vid:%d sid:%d id:%s, query thread is created, numOfQueries:%d, QInfo:%p", pMeterObj->vnode, pMeterObj->sid, + pMeterObj->meterId, pMeterObj->numOfQueries, pQInfo); + + SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv; + assert(pQuery->pGroupbyExpr == NULL && pRuntimeEnv->pMeterObj == pMeterObj); + + if (vnodeHasRemainResults(pQInfo)) { + /* + * there are remain results that are not returned due to result + * interpolation + * So, we do keep in this procedure instead of launching retrieve procedure + */ + int32_t numOfInterpo = 0; + + int32_t remain = taosNumOfRemainPoints(&pRuntimeEnv->interpoInfo); + pQuery->pointsRead = vnodeQueryResultInterpolate(pQInfo, (tFilePage **)pQuery->sdata, + (tFilePage **)pRuntimeEnv->pInterpoBuf, remain, &numOfInterpo); + + doRevisedResultsByLimit(pQInfo); + moveDescOrderResultsToFront(pRuntimeEnv); + + pQInfo->pointsInterpo += numOfInterpo; + pQInfo->pointsRead += pQuery->pointsRead; + + dTrace("QInfo:%p vid:%d sid:%d id:%s, %d points returned %d points interpo, totalRead:%d totalInterpo:%d totalReturn:%d", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo, + pQInfo->pointsRead, pQInfo->pointsInterpo, pQInfo->pointsReturned); + + dTrace("QInfo:%p reset signature", pQInfo); + + TSDB_QINFO_RESET_SIG(pQInfo); + sem_post(&pQInfo->dataReady); + + return; + } + + /* here we have scan all qualified data in both data file and cache. */ + if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { + pQInfo->over = 1; + dTrace("QInfo:%p vid:%d sid:%d id:%s, query over, %d points are returned, reset signature", pQInfo, + pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQInfo->pointsRead); + + vnodePrintQueryStatistics(pQInfo->pMeterQuerySupporter); + TSDB_QINFO_RESET_SIG(pQInfo); + sem_post(&pQInfo->dataReady); + + return; + } + + /* number of points returned during this query */ + pQuery->pointsRead = 0; + assert(pQuery->pos >= 0 && pQuery->slot >= 0); + + int64_t st = taosGetTimestampUs(); + + if (pQuery->nAggTimeInterval != 0) { // interval (downsampling operation) + assert(pQuery->nAggTimeInterval != 0 && pQuery->checkBufferInLoop == 0 && + pQuery->pointsOffset == pQuery->pointsToRead); + vnodeSingleMeterIntervalProcessor(pQInfo); + } else { + if (isFixedOutputQuery(pQuery)) { + assert(pQuery->checkBufferInLoop == 0 && pQuery->nAggTimeInterval == 0); + vnodeSingleMeterFixedOutputProcessor(pQInfo); + } else { // diff/add/multiply/subtract/division + assert(pQuery->checkBufferInLoop == 1 && pQuery->nAggTimeInterval == 0); + vnodeSingleMeterMultiOutputProcessor(pQInfo); + } + } + + /* record the total elapsed time */ + pQInfo->useconds += (taosGetTimestampUs() - st); + + /* check if query is killed or not */ + if (isQueryKilled(pQuery)) { + dTrace("QInfo:%p query is killed, reset signature", pQInfo); + pQInfo->over = 1; + } else { + dTrace("QInfo:%p vid:%d sid:%d id:%s, meter query thread completed, %d points are returned, reset signature", + pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead); + } + + TSDB_QINFO_RESET_SIG(pQInfo); + sem_post(&pQInfo->dataReady); +} + +void vnodeMultiMeterQuery(SSchedMsg *pMsg) { + SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; + + if (pQInfo == NULL || pQInfo->pMeterQuerySupporter == NULL) { + return; + } + + if (pQInfo->killed) { + TSDB_QINFO_RESET_SIG(pQInfo); + dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + return; + } + + assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + + SQuery *pQuery = &pQInfo->query; + pQuery->pointsRead = 0; + + SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter; + if (pSupporter->meterIdx >= pSupporter->pSidSet->numOfSids) { + pQInfo->over = 1; + dTrace("QInfo:%p over, %d meters queried, %d points are returned, reset signature", pQInfo, pSupporter->numOfMeters, + pQInfo->pointsRead); + + // reset status + TSDB_QINFO_RESET_SIG(pQInfo); + + vnodePrintQueryStatistics(pSupporter); + sem_post(&pQInfo->dataReady); + return; + } + + int64_t st = taosGetTimestampUs(); + if (pQuery->nAggTimeInterval > 0 || (isFixedOutputQuery(pQuery) && (!isPointInterpoQuery(pQuery)))) { + assert(pQuery->checkBufferInLoop == 0); + vnodeMultiMeterQueryProcessor(pQInfo); + } else { + assert((pQuery->checkBufferInLoop == 1 && pQuery->nAggTimeInterval == 0 && pQuery->pGroupbyExpr == NULL) || + isPointInterpoQuery(pQuery)); + vnodeMultiMeterMultiOutputProcessor(pQInfo); + } + + /* record the total elapsed time */ + pQInfo->useconds += (taosGetTimestampUs() - st); + pQInfo->over = isQueryKilled(pQuery) ? 1 : 0; + + dTrace("QInfo:%p reset signature", pQInfo); + taosInterpoSetStartInfo(&pQInfo->pMeterQuerySupporter->runtimeEnv.interpoInfo, pQuery->pointsRead, + pQInfo->query.interpoType); + + TSDB_QINFO_RESET_SIG(pQInfo); + sem_post(&pQInfo->dataReady); +} diff --git a/src/system/src/vnodeRead.c b/src/system/src/vnodeRead.c new file mode 100644 index 000000000000..aee4580da73a --- /dev/null +++ b/src/system/src/vnodeRead.c @@ -0,0 +1,1065 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "ihash.h" +#include "taosmsg.h" +#include "tast.h" +#include "textbuffer.h" +#include "vnode.h" +#include "vnodeRead.h" +#include "vnodeUtil.h" + +int (*pQueryFunc[])(SMeterObj *, SQuery *) = {vnodeQueryFromCache, vnodeQueryFromFile}; + +int vnodeInterpolationSearchKey(char *pValue, int num, TSKEY key, int order) { + int firstPos, lastPos, midPos = -1; + int delta, numOfPoints; + TSKEY *keyList; + + keyList = (TSKEY *)pValue; + firstPos = 0; + lastPos = num - 1; + + if (order == 0) { + // from latest to oldest + while (1) { + if (key >= keyList[lastPos]) return lastPos; + if (key == keyList[firstPos]) return firstPos; + if (key < keyList[firstPos]) return firstPos - 1; + + numOfPoints = lastPos - firstPos + 1; + delta = keyList[lastPos] - keyList[firstPos]; + midPos = (key - keyList[firstPos]) / delta * numOfPoints + firstPos; + + if (key < keyList[midPos]) { + lastPos = midPos - 1; + } else if (key > keyList[midPos]) { + firstPos = midPos + 1; + } else { + break; + } + } + + } else { + // from oldest to latest + while (1) { + if (key <= keyList[firstPos]) return firstPos; + if (key == keyList[lastPos]) return lastPos; + + if (key > keyList[lastPos]) { + lastPos = lastPos + 1; + if (lastPos >= num) return -1; + } + + numOfPoints = lastPos - firstPos + 1; + delta = keyList[lastPos] - keyList[firstPos]; + midPos = (key - keyList[firstPos]) / delta * numOfPoints + firstPos; + + if (key < keyList[midPos]) { + lastPos = midPos - 1; + } else if (key > keyList[midPos]) { + firstPos = midPos + 1; + } else { + break; + } + } + } + + return midPos; +} + +int vnodeBinarySearchKey(char *pValue, int num, TSKEY key, int order) { + int firstPos, lastPos, midPos = -1; + int numOfPoints; + TSKEY *keyList; + + if (num <= 0) return -1; + + keyList = (TSKEY *)pValue; + firstPos = 0; + lastPos = num - 1; + + if (order == 0) { + // find the first position which is smaller than the key + while (1) { + if (key >= keyList[lastPos]) return lastPos; + if (key == keyList[firstPos]) return firstPos; + if (key < keyList[firstPos]) return firstPos - 1; + + numOfPoints = lastPos - firstPos + 1; + midPos = (numOfPoints >> 1) + firstPos; + + if (key < keyList[midPos]) { + lastPos = midPos - 1; + } else if (key > keyList[midPos]) { + firstPos = midPos + 1; + } else { + break; + } + } + + } else { + // find the first position which is bigger than the key + while (1) { + if (key <= keyList[firstPos]) return firstPos; + if (key == keyList[lastPos]) return lastPos; + + if (key > keyList[lastPos]) { + lastPos = lastPos + 1; + if (lastPos >= num) + return -1; + else + return lastPos; + } + + numOfPoints = lastPos - firstPos + 1; + midPos = (numOfPoints >> 1) + firstPos; + + if (key < keyList[midPos]) { + lastPos = midPos - 1; + } else if (key > keyList[midPos]) { + firstPos = midPos + 1; + } else { + break; + } + } + } + + return midPos; +} + +int (*vnodeSearchKeyFunc[])(char *pValue, int num, TSKEY key, int order) = {vnodeBinarySearchKey, + vnodeInterpolationSearchKey}; + +static SQInfo *vnodeAllocateQInfoCommon(SQueryMeterMsg *pQueryMsg, SMeterObj *pMeterObj, SSqlFunctionExpr *pExprs) { + SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo)); + if (pQInfo == NULL) { + return NULL; + } + + SQuery *pQuery = &(pQInfo->query); + + SColumnFilterMsg *colList = pQueryMsg->colList; + + short numOfCols = pQueryMsg->numOfCols; + short numOfOutputCols = pQueryMsg->numOfOutputCols; + + pQuery->numOfCols = numOfCols; + pQuery->numOfOutputCols = numOfOutputCols; + + pQuery->limit.limit = pQueryMsg->limit; + pQuery->limit.offset = pQueryMsg->offset; + + pQuery->order.order = pQueryMsg->order; + pQuery->order.orderColId = pQueryMsg->orderColId; + + pQuery->colList = calloc(1, sizeof(SColumnFilter) * numOfCols); + if (pQuery->colList == NULL) { + goto _clean_memory; + } + + for (int16_t i = 0; i < numOfCols; ++i) { + pQuery->colList[i].req[0] = 1; // column required during mater scan of data blocks + pQuery->colList[i].colIdxInBuf = i; + pQuery->colList[i].data = colList[i]; + pQuery->dataRowSize += colList[i].bytes; + } + + vnodeUpdateQueryColumnIndex(pQuery, pMeterObj); + + for (int16_t col = 0; col < numOfOutputCols; ++col) { + assert(pExprs[col].resBytes > 0); + + pQuery->rowSize += pExprs[col].resBytes; + if (pExprs[col].pBase.colInfo.isTag) { + continue; + } + + int16_t colId = pExprs[col].pBase.colInfo.colId; + int16_t functId = pExprs[col].pBase.functionId; + + // build the projection of actual column data in buffer and the real column + // index + for (int32_t k = 0; k < numOfCols; ++k) { + if (pQuery->colList[k].data.colId == colId) { + pExprs[col].pBase.colInfo.colIdxInBuf = (int16_t)k; + pExprs[col].pBase.colInfo.colIdx = pQuery->colList[k].colIdx; + + if (((functId == TSDB_FUNC_FIRST_DST || functId == TSDB_FUNC_FIRST) && pQuery->order.order == TSQL_SO_DESC) || + ((functId == TSDB_FUNC_LAST_DST || functId == TSDB_FUNC_LAST) && pQuery->order.order == TSQL_SO_ASC)) { + pQuery->colList[k].req[1] = 1; + } else if (functId == TSDB_FUNC_STDDEV) { + pQuery->colList[k].req[1] = 1; + } + break; + } + } + } + + pQuery->pSelectExpr = pExprs; + + int32_t ret = vnodeCreateFilterInfo(pQuery); + if (ret != TSDB_CODE_SUCCESS) { + goto _clean_memory; + } + + vnodeUpdateFilterColumnIndex(pQuery); + return pQInfo; + +_clean_memory: + tfree(pQuery->pFilterInfo); + tfree(pQuery->colList); + tfree(pQInfo); + + return NULL; +} + +static SQInfo *vnodeAllocateQInfoEx(SQueryMeterMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pExprs, + SMeterObj *pMeterObj) { + SQInfo *pQInfo = vnodeAllocateQInfoCommon(pQueryMsg, pMeterObj, pExprs); + if (pQInfo == NULL) { + tfree(pExprs); + tfree(pGroupbyExpr); + + return NULL; + } + + SQuery *pQuery = &(pQInfo->query); + + /* pQuery->sdata is the results output buffer. */ + pQuery->sdata = (SData **)calloc(pQuery->numOfOutputCols, sizeof(SData *)); + if (pQuery->sdata == NULL) { + goto sign_clean_memory; + } + + pQuery->pGroupbyExpr = pGroupbyExpr; + pQuery->nAggTimeInterval = pQueryMsg->nAggTimeInterval; + pQuery->interpoType = pQueryMsg->interpoType; + pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit; + + pQInfo->query.pointsToRead = vnodeList[pMeterObj->vnode].cfg.rowsInFileBlock; + + for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) { + size_t size = (pQInfo->query.pointsToRead + 1) * pExprs[col].resBytes + sizeof(SData); + pQuery->sdata[col] = (SData *)calloc(1, size); + if (pQuery->sdata[col] == NULL) { + goto sign_clean_memory; + } + } + + if (pQuery->interpoType != TSDB_INTERPO_NONE) { + pQuery->defaultVal = malloc(sizeof(int64_t) * pQuery->numOfOutputCols); + if (pQuery->defaultVal == NULL) { + goto sign_clean_memory; + } + + // the first column is the timestamp + memcpy(pQuery->defaultVal, (char *)pQueryMsg->defaultVal, pQuery->numOfOutputCols * sizeof(int64_t)); + } + + // to make sure third party won't overwrite this structure + pQInfo->signature = (uint64_t)pQInfo; + pQInfo->pObj = pMeterObj; + pQuery->slot = -1; + pQuery->pos = -1; + pQuery->hfd = -1; + pQuery->dfd = -1; + pQuery->lfd = -1; + + dTrace("vid:%d sid:%d meterId:%s, QInfo is allocated:%p", pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, + pQInfo); + + return pQInfo; + +sign_clean_memory: + tfree(pQuery->defaultVal); + + if (pQuery->sdata != NULL) { + for (int16_t col = 0; col < pQuery->numOfOutputCols; ++col) { + tfree(pQuery->sdata[col]); + } + } + + tfree(pQuery->sdata); + tfree(pQuery->pFilterInfo); + tfree(pQuery->colList); + + tfree(pExprs); + tfree(pGroupbyExpr); + + tfree(pQInfo); + + return NULL; +} + +SQInfo *vnodeAllocateQInfo(SQueryMeterMsg *pQueryMsg, SMeterObj *pObj, SSqlFunctionExpr *pExprs) { + SQInfo *pQInfo = vnodeAllocateQInfoCommon(pQueryMsg, pObj, pExprs); + if (pQInfo == NULL) { + tfree(pExprs); + return NULL; + } + + SQuery *pQuery = &(pQInfo->query); + + pQuery->sdata = (SData **)malloc(sizeof(SData *) * pQuery->numOfOutputCols); + if (pQuery->sdata == NULL) { + goto __clean_memory; + } + + size_t size = 0; + int32_t numOfRows = vnodeList[pObj->vnode].cfg.rowsInFileBlock; + for (int col = 0; col < pQuery->numOfOutputCols; ++col) { + size = 2 * (numOfRows * pQuery->pSelectExpr[col].resBytes + sizeof(SData)); + pQuery->sdata[col] = (SData *)malloc(size); + if (pQuery->sdata[col] == NULL) { + goto __clean_memory; + } + } + + if (pQuery->colList[0].data.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + size = 2 * (numOfRows * TSDB_KEYSIZE + sizeof(SData)); + pQuery->tsData = (SData *)malloc(size); + if (pQuery->tsData == NULL) { + goto __clean_memory; + } + } + + // to make sure third party won't overwrite this structure + pQInfo->signature = (uint64_t)pQInfo; + pQInfo->pObj = pObj; + pQuery->slot = -1; + pQuery->hfd = -1; + pQuery->dfd = -1; + pQuery->lfd = -1; + pQuery->pos = -1; + pQuery->interpoType = TSDB_INTERPO_NONE; + + dTrace("vid:%d sid:%d meterId:%s, QInfo is allocated:%p", pObj->vnode, pObj->sid, pObj->meterId, pQInfo); + return pQInfo; + +__clean_memory: + + tfree(pQuery->tsData); + if (pQuery->sdata != NULL) { + for (int col = 0; col < pQuery->numOfOutputCols; ++col) { + tfree(pQuery->sdata[col]); + } + } + tfree(pQuery->sdata); + tfree(pQuery->pFilterInfo); + tfree(pQuery->colList); + + tfree(pExprs); + + tfree(pQInfo); + + return NULL; +} + +static void vnodeFreeQInfoInQueueImpl(SSchedMsg *pMsg) { + SQInfo *pQInfo = (SQInfo *)pMsg->ahandle; + vnodeFreeQInfo(pQInfo, true); +} + +void vnodeFreeQInfoInQueue(void *param) { + SQInfo *pQInfo = (SQInfo *)param; + + if (!vnodeIsQInfoValid(pQInfo)) return; + + pQInfo->killed = 1; + + dTrace("QInfo:%p set kill flag and add to queue, stop query ASAP", pQInfo); + SSchedMsg schedMsg = {0}; + schedMsg.fp = vnodeFreeQInfoInQueueImpl; + + schedMsg.msg = NULL; + schedMsg.thandle = (void *)1; + schedMsg.ahandle = param; + taosScheduleTask(queryQhandle, &schedMsg); +} + +void vnodeFreeQInfo(void *param, bool decQueryRef) { + SQInfo *pQInfo = (SQInfo *)param; + if (!vnodeIsQInfoValid(param)) return; + + pQInfo->killed = 1; + TSDB_WAIT_TO_SAFE_DROP_QINFO(pQInfo); + + SMeterObj *pObj = pQInfo->pObj; + dTrace("QInfo:%p start to free SQInfo", pQInfo); + + if (decQueryRef) { + vnodeDecMeterRefcnt(pQInfo); + } + + SQuery *pQuery = &(pQInfo->query); + tclose(pQuery->hfd); + tclose(pQuery->dfd); + tclose(pQuery->lfd); + + vnodeFreeFields(pQuery); + + tfree(pQuery->pBlock); + + for (int col = 0; col < pQuery->numOfOutputCols; ++col) { + tfree(pQuery->sdata[col]); + } + + for (int col = 0; col < pQuery->numOfCols; ++col) { + if (pQuery->colList[col].data.filterOnBinary == 1 && pQuery->colList[col].data.filterOn) { + tfree(pQuery->colList[col].data.pz); + pQuery->colList[col].data.len = 0; + } + } + + if (pQuery->colList[0].colIdx != PRIMARYKEY_TIMESTAMP_COL_INDEX) { + tfree(pQuery->tsData); + } + + sem_destroy(&(pQInfo->dataReady)); + vnodeQueryFreeQInfoEx(pQInfo); + + tfree(pQuery->pFilterInfo); + tfree(pQuery->colList); + tfree(pQuery->sdata); + + if (pQuery->pSelectExpr != NULL) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SSqlBinaryExprInfo *pBinExprInfo = &pQuery->pSelectExpr[i].pBinExprInfo; + + if (pBinExprInfo->numOfCols > 0) { + tfree(pBinExprInfo->pReqColumns); + tSQLBinaryExprDestroy(&pBinExprInfo->pBinExpr); + } + } + + tfree(pQuery->pSelectExpr); + } + + if (pQuery->defaultVal != NULL) { + tfree(pQuery->defaultVal); + } + + tfree(pQuery->pGroupbyExpr); + + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, QInfo is freed", pQInfo, pObj->vnode, pObj->sid, pObj->meterId); + + /* + * destory signature, in order to avoid the query process pass the object + * safety check + */ + memset(pQInfo, 0, sizeof(SQInfo)); + tfree(pQInfo); +} + +bool vnodeIsQInfoValid(void *param) { + SQInfo *pQInfo = (SQInfo *)param; + if (pQInfo == NULL) { + return false; + } + + /* + * pQInfo->signature may be changed by another thread, so we assign value of signature + * into local variable, then compare by using local variable + */ + uint64_t sig = pQInfo->signature; + return (sig == (uint64_t)pQInfo) || (sig == TSDB_QINFO_QUERY_FLAG); +} + +void vnodeQueryData(SSchedMsg *pMsg) { + SQuery *pQuery; + SQInfo *pQInfo; + + pQInfo = (SQInfo *)pMsg->ahandle; + + if (pQInfo->killed) { + TSDB_QINFO_RESET_SIG(pQInfo); + dTrace("QInfo:%p it is already killed, reset signature and abort", pQInfo); + return; + } + + assert(pQInfo->signature == TSDB_QINFO_QUERY_FLAG); + pQuery = &(pQInfo->query); + + SMeterObj *pObj = pQInfo->pObj; + + dTrace("QInfo:%p vid:%d sid:%d id:%s, query thread is created, numOfQueries:%d", pQInfo, pObj->vnode, pObj->sid, + pObj->meterId, pObj->numOfQueries); + + pQuery->pointsToRead = vnodeList[pObj->vnode].cfg.rowsInFileBlock; + pQuery->pointsOffset = pQInfo->bufIndex * pQuery->pointsToRead; + + // dTrace("id:%s, start to query data", pQInfo->pObj->meterId); + int64_t st = taosGetTimestampUs(); + + while (1) { + int64_t potentNumOfRes = pQInfo->pointsRead + pQuery->pointsToRead; + /* limit the potential overflow data */ + if (pQuery->limit.limit > 0 && potentNumOfRes > pQuery->limit.limit) { + pQuery->pointsToRead = pQuery->limit.limit - pQInfo->pointsRead; + + if (pQuery->pointsToRead == 0) { + /* reach the limitation, abort */ + pQuery->pointsRead = 0; + pQInfo->over = 1; + break; + } + } + + pQInfo->code = (*pQInfo->fp)(pObj, pQuery); // <0:error + + // has read at least one point + if (pQuery->pointsRead > 0 || pQInfo->code < 0) break; + + if (pQuery->pointsRead == 0 && pQuery->over == 0) continue; + + if (pQInfo->changed) { + pQInfo->over = 1; + break; + } + + // has read all data in file, check data in cache + pQInfo->fp = pQueryFunc[pQuery->order.order ^ 1]; + pQInfo->changed = 1; + + pQuery->slot = -1; // reset the handle + pQuery->over = 0; + + dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%lld query:%p", + pObj->vnode, pObj->sid, pObj->meterId, pQuery->order.order, pQuery->skey, pQuery); + } + + pQInfo->pointsRead += pQuery->pointsRead; + + dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%lld, query:%p", + pObj->vnode, pObj->sid, pObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, + pQuery->lastKey, pQuery); + + int64_t et = taosGetTimestampUs(); + pQInfo->useconds += et - st; + + // close FDs as soon as possible + if (pQInfo->over) { + dTrace("vid:%d sid:%d id:%s, query over, %d points are returned", pObj->vnode, pObj->sid, pObj->meterId, + pQInfo->pointsRead); + tclose(pQInfo->query.hfd); + tclose(pQInfo->query.dfd); + tclose(pQInfo->query.lfd); + } + + /* reset QInfo signature */ + dTrace("QInfo:%p reset signature", pQInfo); + TSDB_QINFO_RESET_SIG(pQInfo); + sem_post(&pQInfo->dataReady); +} + +void *vnodeQueryInTimeRange(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, + SQueryMeterMsg *pQueryMsg, int32_t *code) { + SQInfo *pQInfo; + SQuery *pQuery; + + SMeterObj *pMeterObj = pMetersObj[0]; + bool isProjQuery = vnodeIsProjectionQuery(pSqlExprs, pQueryMsg->numOfOutputCols); + + if (isProjQuery) { + pQInfo = vnodeAllocateQInfo(pQueryMsg, pMeterObj, pSqlExprs); + } else { + pQInfo = vnodeAllocateQInfoEx(pQueryMsg, pGroupbyExpr, pSqlExprs, pMetersObj[0]); + } + + if (pQInfo == NULL) { + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _error; + } + + pQuery = &(pQInfo->query); + dTrace("qmsg:%p create QInfo:%p, QInfo created", pQueryMsg, pQInfo); + + pQuery->order.order = pQueryMsg->order; + pQuery->skey = pQueryMsg->skey; + pQuery->ekey = pQueryMsg->ekey; + + pQuery->lastKey = pQuery->skey; + + pQInfo->fp = pQueryFunc[pQueryMsg->order]; + pQInfo->num = pQueryMsg->num; + + if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) { + dError("QInfo:%p vid:%d sid:%d meterId:%s, init dataReady sem failed, reason:%s", pQInfo, pMeterObj->vnode, + pMeterObj->sid, pMeterObj->meterId, strerror(errno)); + *code = TSDB_CODE_APP_ERROR; + goto _error; + } + + SSchedMsg schedMsg = {0}; + + if (!isProjQuery) { + if (vnodeParametersSafetyCheck(pQuery) == false) { + *code = TSDB_CODE_APP_ERROR; + goto _error; + } + + SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj)); + pSupporter->numOfMeters = 1; + + pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt); + taosAddIntHash(pSupporter->pMeterObj, pMetersObj[0]->sid, (char *)&pMetersObj[0]); + + pSupporter->pSidSet = NULL; + pSupporter->subgroupIdx = -1; + pSupporter->pMeterSidExtInfo = NULL; + + pQInfo->pMeterQuerySupporter = pSupporter; + + if (((*code) = vnodeQuerySingleMeterPrepare(pQInfo, pQInfo->pObj, pSupporter)) != TSDB_CODE_SUCCESS) { + goto _error; + } + + if (pQInfo->over == 1) { + return pQInfo; + } + + schedMsg.fp = vnodeSingleMeterQuery; + } else { + schedMsg.fp = vnodeQueryData; + } + + // set in query flag + pQInfo->signature = TSDB_QINFO_QUERY_FLAG; + + schedMsg.msg = NULL; + schedMsg.thandle = (void *)1; + schedMsg.ahandle = pQInfo; + + dTrace("QInfo:%p set query flag and prepare runtime environment completed, wait for schedule", pQInfo); + + taosScheduleTask(queryQhandle, &schedMsg); + return pQInfo; + +_error: + // table query ref will be decrease during error handling + vnodeFreeQInfo(pQInfo, false); + return NULL; +} + +/* + * query on multi-meters + */ +void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyExpr, SSqlFunctionExpr *pSqlExprs, + SQueryMeterMsg *pQueryMsg, int32_t *code) { + SQInfo *pQInfo; + SQuery *pQuery; + + assert(pQueryMsg->metricQuery == 1 && pQueryMsg->numOfCols > 0 && pQueryMsg->pSidExtInfo != 0 && + pQueryMsg->numOfSids >= 1); + + pQInfo = vnodeAllocateQInfoEx(pQueryMsg, pGroupbyExpr, pSqlExprs, *pMetersObj); + if (pQInfo == NULL) { + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _error; + } + + pQuery = &(pQInfo->query); + dTrace("qmsg:%p create QInfo:%p, QInfo created", pQueryMsg, pQInfo); + + pQuery->order.order = pQueryMsg->order; + pQuery->skey = pQueryMsg->skey; + pQuery->ekey = pQueryMsg->ekey; + + pQInfo->fp = pQueryFunc[pQueryMsg->order]; + pQInfo->num = pQueryMsg->num; + + if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) { + dError("QInfo:%p vid:%d sid:%d id:%s, init dataReady sem failed, reason:%s", pQInfo, pMetersObj[0]->vnode, + pMetersObj[0]->sid, pMetersObj[0]->meterId, strerror(errno)); + *code = TSDB_CODE_APP_ERROR; + goto _error; + } + + SSchedMsg schedMsg = {0}; + + SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj)); + pSupporter->numOfMeters = pQueryMsg->numOfSids; + + pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt); + for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) { + taosAddIntHash(pSupporter->pMeterObj, pMetersObj[i]->sid, (char *)&pMetersObj[i]); + } + + pSupporter->pMeterSidExtInfo = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; + int32_t sidElemLen = pQueryMsg->tagLength + sizeof(SMeterSidExtInfo); + + int32_t size = POINTER_BYTES * pQueryMsg->numOfSids + sidElemLen * pQueryMsg->numOfSids; + pSupporter->pMeterSidExtInfo = (SMeterSidExtInfo **)malloc(size); + if (pSupporter->pMeterSidExtInfo == NULL) { + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + dError("QInfo:%p failed to allocate memory for meterSid info, size:%d, abort", pQInfo, size); + goto _error; + } + + char *px = ((char *)pSupporter->pMeterSidExtInfo) + POINTER_BYTES * pQueryMsg->numOfSids; + + for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { + pSupporter->pMeterSidExtInfo[i] = (SMeterSidExtInfo *)px; + pSupporter->pMeterSidExtInfo[i]->sid = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->sid; + + if (pQueryMsg->tagLength > 0) { + memcpy(pSupporter->pMeterSidExtInfo[i]->tags, ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->tags, + pQueryMsg->tagLength); + } + px += sidElemLen; + } + + if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupbyCols > 0) { + pSupporter->pSidSet = + tSidSetCreate(pSupporter->pMeterSidExtInfo, pQueryMsg->numOfSids, (SSchema *)pQueryMsg->pTagSchema, + pQueryMsg->numOfTagsCols, pGroupbyExpr->tagIndex, pGroupbyExpr->numOfGroupbyCols); + } else { + pSupporter->pSidSet = tSidSetCreate(pSupporter->pMeterSidExtInfo, pQueryMsg->numOfSids, + (SSchema *)pQueryMsg->pTagSchema, pQueryMsg->numOfTagsCols, NULL, 0); + } + + pQInfo->pMeterQuerySupporter = pSupporter; + + if (((*code) = vnodeMultiMeterQueryPrepare(pQInfo, pQuery)) != TSDB_CODE_SUCCESS) { + goto _error; + } + + if (pQInfo->over == 1) { + return pQInfo; + } + + pQInfo->signature = TSDB_QINFO_QUERY_FLAG; + + schedMsg.msg = NULL; + schedMsg.thandle = (void *)1; + schedMsg.ahandle = pQInfo; + schedMsg.fp = vnodeMultiMeterQuery; + + dTrace("QInfo:%p set query flag and prepare runtime environment completed, wait for schedule", pQInfo); + + taosScheduleTask(queryQhandle, &schedMsg); + return pQInfo; + +_error: + // table query ref will be decrease during error handling + vnodeFreeQInfo(pQInfo, false); + return NULL; +} + +/* engine provides the storage, the app has to save the data before next + retrieve, *pNum is the number of points retrieved, and argv[] is + the point to retrieved column +*/ + +int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t *timePrec) { + SQInfo *pQInfo; + SQuery *pQuery; + + *numOfRows = 0; + *rowSize = 0; + + pQInfo = (SQInfo *)handle; + if (pQInfo == NULL || pQInfo->killed) return -1; + pQuery = &(pQInfo->query); + + if (!vnodeIsQInfoValid(pQInfo) || (pQuery->sdata == NULL)) { + dError("%p retrieve memory is corrupted!!! QInfo:%p, sign:%p, sdata:%p", pQuery, pQInfo, pQInfo->signature, + pQuery->sdata); + return TSDB_CODE_APP_ERROR; + } + + if (pQInfo->killed) { + dTrace("%p it is already killed", pQuery); + return TSDB_CODE_APP_ERROR; + } + + sem_wait(&pQInfo->dataReady); + *numOfRows = pQInfo->pointsRead - pQInfo->pointsReturned; + *rowSize = pQuery->rowSize; + + *timePrec = vnodeList[pQInfo->pObj->vnode].cfg.precision; + + if (pQInfo->code < 0) return -pQInfo->code; + + return 0; +} + +// vnodeRetrieveQueryInfo must be called first +int vnodeSaveQueryResult(void *handle, char *data) { + SQInfo *pQInfo = (SQInfo *)handle; + + // the remained number of retrieved rows, not the interpolated result + int numOfRows = pQInfo->pointsRead - pQInfo->pointsReturned; + + int32_t numOfFinal = vnodeCopyQueryResultToMsg(pQInfo, data, numOfRows); + pQInfo->pointsReturned += numOfFinal; //(pQInfo->pointsRead + pQInfo->pointsInterpo); + + dTrace("QInfo:%p %d are returned, totalReturned:%d totalRead:%d", pQInfo, numOfFinal, pQInfo->pointsReturned, + pQInfo->pointsRead); + + if (pQInfo->over == 0) { + dTrace("QInfo:%p set query flag, oldSig:%p, func:%s", pQInfo, pQInfo->signature, __FUNCTION__); + uint64_t oldSignature = TSDB_QINFO_SET_QUERY_FLAG(pQInfo); + + /* + * If SQInfo has been released, the value of signature cannot be equalled to + * the address of pQInfo, since in release function, the original value has + * been + * destroyed. However, this memory area may be reused by another function. + * It may be 0 or any value, but it is rarely still be equalled to the address + * of SQInfo. + */ + if (oldSignature == 0 || oldSignature != (uint64_t)pQInfo) { + dTrace("%p freed or killed, old sig:%p abort query", pQInfo, oldSignature); + } else { + dTrace("%p add query into task queue for schedule", pQInfo); + + SSchedMsg schedMsg; + + if (pQInfo->pMeterQuerySupporter != NULL) { + if (pQInfo->pMeterQuerySupporter->pSidSet == NULL) { + schedMsg.fp = vnodeSingleMeterQuery; + } else { // group by tag + schedMsg.fp = vnodeMultiMeterQuery; + } + } else { + pQInfo->bufIndex = pQInfo->bufIndex ^ 1; // exchange between 0 and 1 + schedMsg.fp = vnodeQueryData; + } + + schedMsg.msg = NULL; + schedMsg.thandle = (void *)1; + schedMsg.ahandle = pQInfo; + taosScheduleTask(queryQhandle, &schedMsg); + } + } + + return numOfFinal; +} + +static int32_t validateQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { + if (pQueryMsg->nAggTimeInterval < 0) { + dError("qmsg:%p illegal value of aggTimeInterval %ld", pQueryMsg, pQueryMsg->nAggTimeInterval); + return -1; + } + + if (pQueryMsg->numOfTagsCols < 0 || pQueryMsg->numOfTagsCols > TSDB_MAX_TAGS) { + dError("qmsg:%p illegal value of numOfTagsCols %ld", pQueryMsg, pQueryMsg->numOfTagsCols); + return -1; + } + + if (pQueryMsg->numOfCols <= 0 || pQueryMsg->numOfCols > TSDB_MAX_COLUMNS) { + dError("qmsg:%p illegal value of numOfCols %ld", pQueryMsg, pQueryMsg->numOfCols); + return -1; + } + + if (pQueryMsg->numOfSids <= 0) { + dError("qmsg:%p illegal value of numOfSids %ld", pQueryMsg, pQueryMsg->numOfSids); + return -1; + } + + if (pQueryMsg->numOfGroupbyCols < 0) { + dError("qmsg:%p illegal value of numOfGroupbyCols %ld", pQueryMsg, pQueryMsg->numOfGroupbyCols); + return -1; + } + + if (pQueryMsg->numOfOutputCols > TSDB_MAX_COLUMNS || pQueryMsg->numOfOutputCols <= 0) { + dError("qmsg:%p illegal value of output columns %d", pQueryMsg, pQueryMsg->numOfOutputCols); + return -1; + } + + if (pQueryMsg->tagLength < 0) { + dError("qmsg:%p illegal value of tag length %d", pQueryMsg, pQueryMsg->tagLength); + return -1; + } + + return 0; +} + +int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { + pQueryMsg->vnode = htons(pQueryMsg->vnode); + pQueryMsg->numOfSids = htonl(pQueryMsg->numOfSids); + +#ifdef TSKEY32 + pQueryMsg->skey = htonl(pQueryMsg->skey); + pQueryMsg->ekey = htonl(pQueryMsg->ekey); +#else + pQueryMsg->skey = htobe64(pQueryMsg->skey); + pQueryMsg->ekey = htobe64(pQueryMsg->ekey); +#endif + + pQueryMsg->num = htonl(pQueryMsg->num); + + pQueryMsg->order = htons(pQueryMsg->order); + pQueryMsg->orderColId = htons(pQueryMsg->orderColId); + + pQueryMsg->metricQuery = htons(pQueryMsg->metricQuery); + + pQueryMsg->nAggTimeInterval = htobe64(pQueryMsg->nAggTimeInterval); + pQueryMsg->numOfTagsCols = htons(pQueryMsg->numOfTagsCols); + pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols); + pQueryMsg->numOfOutputCols = htons(pQueryMsg->numOfOutputCols); + pQueryMsg->numOfGroupbyCols = htons(pQueryMsg->numOfGroupbyCols); + pQueryMsg->tagLength = htons(pQueryMsg->tagLength); + + pQueryMsg->limit = htobe64(pQueryMsg->limit); + pQueryMsg->offset = htobe64(pQueryMsg->offset); + + // query msg safety check + if (validateQueryMeterMsg(pQueryMsg) != 0) { + return TSDB_CODE_INVALID_QUERY_MSG; + } + + SMeterSidExtInfo **pSids = NULL; + char * strBuf = (char *)&pQueryMsg->colList[pQueryMsg->numOfCols]; + for (int32_t col = 0; col < pQueryMsg->numOfCols; ++col) { + pQueryMsg->colList[col].colId = htons(pQueryMsg->colList[col].colId); + + pQueryMsg->colList[col].type = htons(pQueryMsg->colList[col].type); + pQueryMsg->colList[col].bytes = htons(pQueryMsg->colList[col].bytes); + + assert(pQueryMsg->colList[col].type >= TSDB_DATA_TYPE_BOOL && pQueryMsg->colList[col].type <= TSDB_DATA_TYPE_NCHAR); + + pQueryMsg->colList[col].filterOn = htons(pQueryMsg->colList[col].filterOn); + pQueryMsg->colList[col].filterOnBinary = htons(pQueryMsg->colList[col].filterOnBinary); + + if (pQueryMsg->colList[col].filterOn && pQueryMsg->colList[col].filterOnBinary) { + pQueryMsg->colList[col].len = htobe64(pQueryMsg->colList[col].len); + pQueryMsg->colList[col].pz = calloc(1, pQueryMsg->colList[col].len + 1); + strcpy(pQueryMsg->colList[col].pz, strBuf); + strBuf += pQueryMsg->colList[col].len + 1; + } else { + pQueryMsg->colList[col].lowerBndi = htobe64(pQueryMsg->colList[col].lowerBndi); + pQueryMsg->colList[col].upperBndi = htobe64(pQueryMsg->colList[col].upperBndi); + } + + pQueryMsg->colList[col].lowerRelOptr = htons(pQueryMsg->colList[col].lowerRelOptr); + pQueryMsg->colList[col].upperRelOptr = htons(pQueryMsg->colList[col].upperRelOptr); + } + + bool hasArithmeticFunction = false; + + /* + * 1. simple projection query on meters, we only record the pSqlFuncExprs[i].colIdx value + * 2. for complex queries, whole SqlExprs object is required. + */ + pQueryMsg->pSqlFuncExprs = malloc(POINTER_BYTES * pQueryMsg->numOfOutputCols); + + char * pMsg = strBuf; + SSqlFuncExprMsg *pExprMsg = (SSqlFuncExprMsg *)pMsg; + + for (int32_t i = 0; i < pQueryMsg->numOfOutputCols; ++i) { + ((SSqlFuncExprMsg **)pQueryMsg->pSqlFuncExprs)[i] = pExprMsg; + + pExprMsg->colInfo.colIdx = htons(pExprMsg->colInfo.colIdx); + pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); + + pExprMsg->functionId = htons(pExprMsg->functionId); + pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + + pMsg += sizeof(SSqlFuncExprMsg); + + for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { + pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); + pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); + + if (pExprMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { + pExprMsg->arg[j].argValue.pz = pMsg; + pMsg += pExprMsg->arg[j].argBytes + 1; // one more for the string terminated char. + } else { + pExprMsg->arg[j].argValue.i64 = htobe64(pExprMsg->arg[j].argValue.i64); + } + } + + if (pExprMsg->functionId == TSDB_FUNC_ARITHM) { + hasArithmeticFunction = true; + } else if (pExprMsg->functionId == TSDB_FUNC_TAG || pExprMsg->functionId == TSDB_FUNC_TAGPRJ) { + // ignore the column index check for arithmetic expression. + if (!pExprMsg->colInfo.isTag) { + return TSDB_CODE_INVALID_QUERY_MSG; + } + } else { + if (!vnodeValidateExprColumnInfo(pQueryMsg, pExprMsg)) { + return TSDB_CODE_INVALID_QUERY_MSG; + } + } + + pExprMsg = (SSqlFuncExprMsg *)pMsg; + } + + pQueryMsg->colNameLen = htonl(pQueryMsg->colNameLen); + if (hasArithmeticFunction) { // column name array + assert(pQueryMsg->colNameLen > 0); + pQueryMsg->colNameList = pMsg; + pMsg += pQueryMsg->colNameLen; + } + + pSids = (SMeterSidExtInfo **)calloc(pQueryMsg->numOfSids, sizeof(SMeterSidExtInfo *)); + pQueryMsg->pSidExtInfo = (uint64_t)pSids; + + pSids[0] = (SMeterSidExtInfo *)pMsg; + pSids[0]->sid = htonl(pSids[0]->sid); + + for (int32_t j = 1; j < pQueryMsg->numOfSids; ++j) { + pSids[j] = (SMeterSidExtInfo *)((char *)pSids[j - 1] + sizeof(SMeterSidExtInfo) + pQueryMsg->tagLength); + pSids[j]->sid = htonl(pSids[j]->sid); + } + + pMsg = (char *)pSids[pQueryMsg->numOfSids - 1]; + pMsg += sizeof(SMeterSidExtInfo) + pQueryMsg->tagLength; + + if (pQueryMsg->numOfGroupbyCols > 0 || pQueryMsg->numOfTagsCols > 0) { // group by tag columns + pQueryMsg->pTagSchema = (uint64_t)pMsg; + SSchema *pTagSchema = (SSchema *)pQueryMsg->pTagSchema; + pMsg += sizeof(SSchema) * pQueryMsg->numOfTagsCols; + + if (pQueryMsg->numOfGroupbyCols > 0) { + pQueryMsg->groupbyTagIds = (uint64_t) & (pTagSchema[pQueryMsg->numOfTagsCols]); + } else { + pQueryMsg->groupbyTagIds = 0; + } + pQueryMsg->orderByIdx = htons(pQueryMsg->orderByIdx); + pQueryMsg->orderType = htons(pQueryMsg->orderType); + + pMsg += sizeof(int16_t) * pQueryMsg->numOfGroupbyCols; + } else { + pQueryMsg->pTagSchema = 0; + pQueryMsg->groupbyTagIds = 0; + } + + pQueryMsg->interpoType = htons(pQueryMsg->interpoType); + if (pQueryMsg->interpoType != TSDB_INTERPO_NONE) { + pQueryMsg->defaultVal = (uint64_t)(pMsg); + + int64_t *v = (int64_t *)pMsg; + for (int32_t i = 0; i < pQueryMsg->numOfOutputCols; ++i) { + v[i] = htobe64(v[i]); + } + } + + dTrace("qmsg:%p query on %d meter(s), qrange:%lld-%lld, numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " + "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%lld, fillType:%d", + pQueryMsg, pQueryMsg->numOfSids, pQueryMsg->skey, pQueryMsg->ekey, pQueryMsg->numOfGroupbyCols, + pQueryMsg->numOfTagsCols, pQueryMsg->order, pQueryMsg->orderType, pQueryMsg->orderByIdx, + pQueryMsg->numOfOutputCols, pQueryMsg->numOfCols, pQueryMsg->nAggTimeInterval, pQueryMsg->interpoType); + + return 0; +} diff --git a/src/system/src/vnodeShell.c b/src/system/src/vnodeShell.c new file mode 100644 index 000000000000..5384b3cad765 --- /dev/null +++ b/src/system/src/vnodeShell.c @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE + +#include "vnodeShell.h" +#include +#include +#include +#include +#include "taosmsg.h" +#include "tschemautil.h" + +#include "textbuffer.h" +#include "trpc.h" +#include "vnode.h" +#include "vnodeRead.h" +#include "vnodeUtil.h" + +void * pShellServer = NULL; +SShellObj **shellList = NULL; + +int vnodeProcessRetrieveRequest(char *pMsg, int msgLen, SShellObj *pObj); +int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj); +int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj); + +int vnodeSelectReqNum = 0; +int vnodeInsertReqNum = 0; + +void *vnodeProcessMsgFromShell(char *msg, void *ahandle, void *thandle) { + int sid, vnode; + SShellObj *pObj = (SShellObj *)ahandle; + SIntMsg * pMsg = (SIntMsg *)msg; + uint32_t peerId, peerIp; + short peerPort; + char ipstr[20]; + + if (msg == NULL) { + if (pObj) { + pObj->thandle = NULL; + dTrace("QInfo:%p %s free qhandle", pObj->qhandle, __FUNCTION__); + vnodeFreeQInfo(pObj->qhandle, true); + pObj->qhandle = NULL; + vnodeList[pObj->vnode].shellConns--; + dTrace("vid:%d, shell connection:%d is gone, shellConns:%d", pObj->vnode, pObj->sid, + vnodeList[pObj->vnode].shellConns); + } + return NULL; + } + + taosGetRpcConnInfo(thandle, &peerId, &peerIp, &peerPort, &vnode, &sid); + + if (pObj == NULL) { + if (shellList[vnode]) { + pObj = shellList[vnode] + sid; + pObj->thandle = thandle; + pObj->sid = sid; + pObj->vnode = vnode; + pObj->ip = peerIp; + tinet_ntoa(ipstr, peerIp); + vnodeList[pObj->vnode].shellConns++; + dTrace("vid:%d, shell connection:%d from ip:%s is created, shellConns:%d", vnode, sid, ipstr, + vnodeList[pObj->vnode].shellConns); + } else { + dError("vid:%d, vnode not there, shell connection shall be closed", vnode); + return NULL; + } + } else { + if (pObj != shellList[vnode] + sid) { + dError("vid:%d, shell connection:%d, pObj:%p is not matched with:%p", vnode, sid, pObj, shellList[vnode] + sid); + return NULL; + } + } + + dTrace("vid:%d sid:%d, msg:%s is received pConn:%p", vnode, sid, taosMsg[pMsg->msgType], thandle); + + if (pMsg->msgType == TSDB_MSG_TYPE_QUERY) { + vnodeProcessQueryRequest((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); + } else if (pMsg->msgType == TSDB_MSG_TYPE_RETRIEVE) { + vnodeProcessRetrieveRequest((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); + } else if (pMsg->msgType == TSDB_MSG_TYPE_SUBMIT) { + vnodeProcessShellSubmitRequest((char *)pMsg->content, pMsg->msgLen - sizeof(SIntMsg), pObj); + } else { + dError("%s is not processed", taosMsg[pMsg->msgType]); + } + + return pObj; +} + +int vnodeInitShell() { + int size; + SRpcInit rpcInit; + + size = TSDB_MAX_VNODES * sizeof(SShellObj *); + shellList = (SShellObj **)malloc(size); + if (shellList == NULL) return -1; + memset(shellList, 0, size); + + int numOfThreads = tsNumOfCores * tsNumOfThreadsPerCore; + numOfThreads = (1.0 - tsRatioOfQueryThreads) * numOfThreads / 2.0; + if (numOfThreads < 1) numOfThreads = 1; + + memset(&rpcInit, 0, sizeof(rpcInit)); + rpcInit.localIp = tsInternalIp; + rpcInit.localPort = tsVnodeShellPort; + rpcInit.label = "DND-shell"; + rpcInit.numOfThreads = numOfThreads; + rpcInit.fp = vnodeProcessMsgFromShell; + rpcInit.bits = TSDB_SHELL_VNODE_BITS; + rpcInit.numOfChanns = TSDB_MAX_VNODES; + rpcInit.sessionsPerChann = 16; + rpcInit.idMgmt = TAOS_ID_FREE; + rpcInit.connType = TAOS_CONN_UDPS; + rpcInit.idleTime = tsShellActivityTimer * 1200; + rpcInit.qhandle = rpcQhandle; + rpcInit.efp = vnodeSendVpeerCfgMsg; + + pShellServer = taosOpenRpc(&rpcInit); + if (pShellServer == NULL) { + dError("failed to init connection to shell"); + return -1; + } + + return 0; +} + +int vnodeOpenShellVnode(int vnode) { + SVnodeCfg *pCfg = &vnodeList[vnode].cfg; + int sessions = pCfg->maxSessions * 1.1; + if (sessions < 300) sessions = 300; + + int size = sessions * sizeof(SShellObj); + + shellList[vnode] = (SShellObj *)malloc(size); + if (shellList[vnode] == NULL) { + dError("vid:%d failed to allocate shellObj", vnode); + return -1; + } + + memset(shellList[vnode], 0, size); + + taosOpenRpcChann(pShellServer, vnode, sessions); + + return 0; +} + +void vnodeCloseShellVnode(int vnode) { + taosCloseRpcChann(pShellServer, vnode); + + if (shellList[vnode] == NULL) return; + + for (int i = 0; i < vnodeList[vnode].cfg.maxSessions; ++i) { + vnodeFreeQInfo(shellList[vnode][i].qhandle, true); + } + + tfree(shellList[vnode]); +} + +void vnodeCleanUpShell() { + if (pShellServer) taosCloseRpc(pShellServer); + + tfree(shellList); +} + +int vnodeSendQueryRspMsg(SShellObj *pObj, int code, void *qhandle) { + char *pMsg, *pStart; + int msgLen; + + pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_QUERY_RSP, 128); + if (pStart == NULL) return -1; + pMsg = pStart; + + *pMsg = code; + pMsg++; + + *((uint64_t *)pMsg) = (uint64_t)qhandle; + pMsg += 8; + + msgLen = pMsg - pStart; + taosSendMsgToPeer(pObj->thandle, pStart, msgLen); + + return msgLen; +} + +int vnodeSendShellSubmitRspMsg(SShellObj *pObj, int code, int numOfPoints) { + char *pMsg, *pStart; + int msgLen; + + pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_SUBMIT_RSP, 128); + if (pStart == NULL) return -1; + pMsg = pStart; + + *pMsg = code; + pMsg++; + + *(int32_t *)pMsg = numOfPoints; + pMsg += sizeof(numOfPoints); + + msgLen = pMsg - pStart; + taosSendMsgToPeer(pObj->thandle, pStart, msgLen); + + return msgLen; +} + +int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) { + int ret, code = 0; + SMeterObj * pMeterObj = NULL; + SQueryMeterMsg * pQueryMsg; + SMeterSidExtInfo **pSids = NULL; + int32_t incNumber = 0; + SSqlFunctionExpr * pExprs = NULL; + SSqlGroupbyExpr * pGroupbyExpr = NULL; + SMeterObj ** pMeterObjList = NULL; + + pQueryMsg = (SQueryMeterMsg *)pMsg; + if ((code = vnodeConvertQueryMeterMsg(pQueryMsg)) != TSDB_CODE_SUCCESS) { + goto _query_over; + } + + if (pQueryMsg->numOfSids <= 0) { + code = TSDB_CODE_APP_ERROR; + goto _query_over; + } + + if (pQueryMsg->vnode >= TSDB_MAX_VNODES || pQueryMsg->vnode < 0) { + dTrace("qmsg:%p,vid:%d is out of range", pQueryMsg, pQueryMsg->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _query_over; + } + + SVnodeObj *pVnode = &vnodeList[pQueryMsg->vnode]; + + if (pVnode->cfg.maxSessions == 0) { + dError("qmsg:%p,vid:%d is not activated yet", pQueryMsg, pQueryMsg->vnode); + vnodeSendVpeerCfgMsg(pQueryMsg->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _query_over; + } + + if (!(pVnode->accessState & TSDB_VN_READ_ACCCESS)) { + code = TSDB_CODE_NO_READ_ACCESS; + goto _query_over; + } + + if (pQueryMsg->pSidExtInfo == 0) { + dTrace("qmsg:%p,SQueryMeterMsg wrong format", pQueryMsg); + code = TSDB_CODE_APP_ERROR; + goto _query_over; + } + + if (pVnode->meterList == NULL) { + dError("qmsg:%p,vid:%d has been closed", pQueryMsg, pQueryMsg->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _query_over; + } + + pSids = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; + for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { + if (pSids[i]->sid >= pVnode->cfg.maxSessions || pSids[i]->sid < 0) { + dTrace("qmsg:%p sid:%d is out of range, valid range:[%d,%d]", pQueryMsg, pSids[i]->sid, 0, + pVnode->cfg.maxSessions); + + code = TSDB_CODE_INVALID_SESSION_ID; + goto _query_over; + } + } + + // todo optimize for single table query process + pMeterObjList = (SMeterObj **)calloc(pQueryMsg->numOfSids, sizeof(SMeterObj *)); + if (pMeterObjList == NULL) { + code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _query_over; + } + + //add query ref for all meters. if any meter failed to add ref, rollback whole operation and go to error + pthread_mutex_lock(&pVnode->vmutex); + code = vnodeIncQueryRefCount(pQueryMsg, pSids, pMeterObjList, &incNumber); + assert(incNumber <= pQueryMsg->numOfSids); + pthread_mutex_unlock(&pVnode->vmutex); + + if (code != TSDB_CODE_SUCCESS) { + goto _query_over; + } + + pExprs = vnodeCreateSqlFunctionExpr(pQueryMsg, &code); + if (pExprs == NULL) { + assert(code != TSDB_CODE_SUCCESS); + goto _query_over; + } + + pGroupbyExpr = vnodeCreateGroupbyExpr(pQueryMsg, &code); + if ((pGroupbyExpr == NULL && pQueryMsg->numOfGroupbyCols != 0) || code != TSDB_CODE_SUCCESS) { + goto _query_over; + } + + if (pObj->qhandle) { + dTrace("QInfo:%p %s free qhandle", pObj->qhandle, __FUNCTION__); + vnodeFreeQInfo(pObj->qhandle, true); + pObj->qhandle = NULL; + } + + if (pQueryMsg->metricQuery) { + pObj->qhandle = vnodeQueryOnMultiMeters(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); + } else { + assert(pGroupbyExpr == NULL); + pObj->qhandle = vnodeQueryInTimeRange(pMeterObjList, pGroupbyExpr, pExprs, pQueryMsg, &code); + } + +_query_over: + if (code != TSDB_CODE_SUCCESS) { + // if failed to add ref for all meters in this query, abort current query + vnodeDecQueryRefCount(pQueryMsg, pMeterObjList, incNumber); + } + + tfree(pMeterObjList); + ret = vnodeSendQueryRspMsg(pObj, code, pObj->qhandle); + + free(pSids); + + __sync_fetch_and_add(&vnodeSelectReqNum, 1); + return ret; +} + +void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { + char * pMsg = pSched->msg; + int msgLen; + SShellObj *pObj = (SShellObj *)pSched->ahandle; + + SRetrieveMeterMsg *pRetrieve; + SRetrieveMeterRsp *pRsp; + int numOfRows = 0, rowSize = 0, size = 0; + int16_t timePrec = TSDB_TIME_PRECISION_MILLI; + + char *pStart; + + int code = 0; + pRetrieve = (SRetrieveMeterMsg *)pMsg; + + /* + * in case of server restart, apps may hold qhandle created by server before restart, + * which is actually invalid, therefore, signature check is required. + */ + if (pRetrieve->qhandle == (uint64_t)pObj->qhandle) { + // if free flag is set, client wants to clean the resources + if (pRetrieve->free == 0) + code = vnodeRetrieveQueryInfo((void *)(pRetrieve->qhandle), &numOfRows, &rowSize, &timePrec); + } else { + dError("QInfo:%p, qhandle:%p is not matched with saved:%p", pObj->qhandle, pRetrieve->qhandle, pObj->qhandle); + code = TSDB_CODE_INVALID_QHANDLE; + } + + if (code == TSDB_CODE_SUCCESS) { + size = vnodeGetResultSize((void *)(pRetrieve->qhandle), &numOfRows); + } + + pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, size + 100); + if (pStart == NULL) goto _exit; + pMsg = pStart; + + *pMsg = code; + pMsg++; + + pRsp = (SRetrieveMeterRsp *)pMsg; + pRsp->numOfRows = htonl(numOfRows); + pRsp->precision = htons(timePrec); + + if (code == TSDB_CODE_SUCCESS) { + pRsp->offset = htobe64(vnodeGetOffsetVal(pRetrieve->qhandle)); + pRsp->useconds = ((SQInfo *)(pRetrieve->qhandle))->useconds; + } else { + pRsp->offset = 0; + pRsp->useconds = 0; + } + + pMsg = pRsp->data; + + if (numOfRows > 0 && code == TSDB_CODE_SUCCESS) { + vnodeSaveQueryResult((void *)(pRetrieve->qhandle), pRsp->data); + } + + pMsg += size; + msgLen = pMsg - pStart; + + if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) { + dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code); + vnodeFreeQInfoInQueue(pObj->qhandle); + pObj->qhandle = NULL; + } + + taosSendMsgToPeer(pObj->thandle, pStart, msgLen); + +_exit: + free(pSched->msg); + + return; +} + +int vnodeProcessRetrieveRequest(char *pMsg, int msgLen, SShellObj *pObj) { + SSchedMsg schedMsg; + + char *msg = malloc(msgLen); + memcpy(msg, pMsg, msgLen); + schedMsg.msg = msg; + schedMsg.ahandle = pObj; + schedMsg.fp = vnodeExecuteRetrieveReq; + taosScheduleTask(queryQhandle, &schedMsg); + + return msgLen; +} + +int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) { + int code = 0, ret = 0; + SShellSubmitMsg shellSubmit = *(SShellSubmitMsg *)pMsg; + SShellSubmitMsg *pSubmit = &shellSubmit; + + pSubmit->vnode = htons(pSubmit->vnode); + pSubmit->numOfSid = htonl(pSubmit->numOfSid); + + if (pSubmit->numOfSid <= 0) { + dError("invalid num of meters:%d", pSubmit->numOfSid); + code = TSDB_CODE_APP_ERROR; + goto _submit_over; + } + + if (pSubmit->vnode >= TSDB_MAX_VNODES || pSubmit->vnode < 0) { + dTrace("vnode:%d is out of range", pSubmit->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _submit_over; + } + + SVnodeObj *pVnode = vnodeList + pSubmit->vnode; + if (pVnode->cfg.maxSessions == 0 || pVnode->meterList == NULL) { + dError("vid:%d is not activated for submit", pSubmit->vnode); + vnodeSendVpeerCfgMsg(pSubmit->vnode); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _submit_over; + } + + if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) { + code = TSDB_CODE_NO_WRITE_ACCESS; + goto _submit_over; + } + + pObj->count = pSubmit->numOfSid; // for import + pObj->code = 0; // for import + pObj->numOfTotalPoints = 0; // for import + SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pMsg + sizeof(SShellSubmitMsg)); + + int32_t numOfPoints = 0; + int32_t numOfTotalPoints = 0; + + for (int32_t i = 0; i < pSubmit->numOfSid; ++i) { + numOfPoints = 0; + + pBlocks->sid = htonl(pBlocks->sid); + pBlocks->uid = htobe64(pBlocks->uid); + + if (pBlocks->sid >= pVnode->cfg.maxSessions || pBlocks->sid <= 0) { + dTrace("sid:%d is out of range", pBlocks->sid); + code = TSDB_CODE_INVALID_SESSION_ID; + goto _submit_over; + } + + int vnode = pSubmit->vnode; + int sid = pBlocks->sid; + + SMeterObj *pMeterObj = vnodeList[vnode].meterList[sid]; + if (pMeterObj == NULL) { + dError("vid:%d sid:%d, no active session", vnode, sid); + vnodeSendMeterCfgMsg(vnode, sid); + code = TSDB_CODE_NOT_ACTIVE_SESSION; + goto _submit_over; + } + + // dont include sid, vid + int subMsgLen = sizeof(pBlocks->numOfRows) + htons(pBlocks->numOfRows) * pMeterObj->bytesPerPoint; + int sversion = htonl(pBlocks->sversion); + + if (pMeterObj->state == TSDB_METER_STATE_READY) { + if (pSubmit->import) + code = vnodeImportPoints(pMeterObj, (char *)&(pBlocks->numOfRows), subMsgLen, TSDB_DATA_SOURCE_SHELL, pObj, + sversion, &numOfPoints); + else + code = vnodeInsertPoints(pMeterObj, (char *)&(pBlocks->numOfRows), subMsgLen, TSDB_DATA_SOURCE_SHELL, NULL, + sversion, &numOfPoints); + if (code != 0) break; + } else if (pMeterObj->state >= TSDB_METER_STATE_DELETING) { + dTrace("vid:%d sid:%d id:%s, is is removed, state:", pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, + pMeterObj->state); + code = TSDB_CODE_NOT_ACTIVE_SESSION; + break; + } else { // importing state or others + dTrace("vid:%d sid:%d id:%s, try again since in state:%d", pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, + pMeterObj->state); + code = TSDB_CODE_ACTION_IN_PROGRESS; + break; + } + + numOfTotalPoints += numOfPoints; + pBlocks = (SShellSubmitBlock *)((char *)pBlocks + sizeof(SShellSubmitBlock) + + htons(pBlocks->numOfRows) * pMeterObj->bytesPerPoint); + } + +_submit_over: + // for import, send the submit response only when return code is not zero + if (pSubmit->import == 0 || code != 0) ret = vnodeSendShellSubmitRspMsg(pObj, code, numOfTotalPoints); + + __sync_fetch_and_add(&vnodeInsertReqNum, 1); + return ret; +} diff --git a/src/system/src/vnodeStore.c b/src/system/src/vnodeStore.c new file mode 100644 index 000000000000..e7154e07007f --- /dev/null +++ b/src/system/src/vnodeStore.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "dnodeSystem.h" +#include "trpc.h" +#include "ttime.h" +#include "vnode.h" +#include "vnodeStore.h" +#include "vnodeUtil.h" + +int vnodeCreateMeterObjFile(int vnode); + +int tsMaxVnode = -1; +int tsOpenVnodes = 0; +SVnodeObj *vnodeList = NULL; + +int vnodeInitStoreVnode(int vnode) { + SVnodeObj *pVnode = vnodeList + vnode; + + pVnode->vnode = vnode; + vnodeOpenMetersVnode(vnode); + if (pVnode->cfg.maxSessions == 0) return 0; + + pVnode->firstKey = taosGetTimestamp(pVnode->cfg.precision); + + pVnode->pCachePool = vnodeOpenCachePool(vnode); + if (pVnode->pCachePool == NULL) { + dError("vid:%d, cache pool init failed.", pVnode->vnode); + return -1; + } + + if (vnodeInitFile(vnode) < 0) return -1; + + if (vnodeInitCommit(vnode) < 0) { + dError("vid:%d, commit init failed.", pVnode->vnode); + return -1; + } + + pthread_mutex_init(&(pVnode->vmutex), NULL); + dTrace("vid:%d, storage initialized, version:%ld fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, + pVnode->numOfFiles); + + return 0; +} + +int vnodeOpenVnode(int vnode) { + SVnodeObj *pVnode = vnodeList + vnode; + + pVnode->vnode = vnode; + pVnode->accessState = TSDB_VN_ALL_ACCCESS; + if (pVnode->cfg.maxSessions == 0) return 0; + + pthread_mutex_lock(&dmutex); + vnodeOpenShellVnode(vnode); + + if (vnode > tsMaxVnode) tsMaxVnode = vnode; + vnodeCalcOpenVnodes(); + + pthread_mutex_unlock(&dmutex); + + vnodeOpenStreams(pVnode, NULL); + + dTrace("vid:%d, vnode is opened, openVnodes:%d", vnode, tsOpenVnodes); + + return 0; +} + +void vnodeCloseVnode(int vnode) { + if (vnodeList == NULL) return; + + pthread_mutex_lock(&dmutex); + if (vnodeList[vnode].cfg.maxSessions == 0) { + pthread_mutex_unlock(&dmutex); + return; + } + + vnodeCloseStream(vnodeList + vnode); + vnodeCancelCommit(vnodeList + vnode); + vnodeCloseMetersVnode(vnode); + vnodeCloseShellVnode(vnode); + vnodeCloseCachePool(vnode); + vnodeCleanUpCommit(vnode); + + pthread_mutex_destroy(&(vnodeList[vnode].vmutex)); + + if (tsMaxVnode == vnode) tsMaxVnode = vnode - 1; + + tfree(vnodeList[vnode].meterIndex); + memset(vnodeList + vnode, 0, sizeof(SVnodeObj)); + + vnodeCalcOpenVnodes(); + + pthread_mutex_unlock(&dmutex); +} + +int vnodeCreateVnode(int vnode, SVnodeCfg *pCfg, SVPeerDesc *pDesc) { + char fileName[128]; + + vnodeList[vnode].status = TSDB_STATUS_CREATING; + + sprintf(fileName, "%s/vnode%d", tsDirectory, vnode); + mkdir(fileName, 0755); + + sprintf(fileName, "%s/vnode%d/db", tsDirectory, vnode); + mkdir(fileName, 0755); + + vnodeList[vnode].cfg = *pCfg; + if (vnodeCreateMeterObjFile(vnode) != 0) { + return TSDB_CODE_VG_INIT_FAILED; + } + + if (vnodeSaveVnodeCfg(vnode, pCfg, pDesc) != 0) { + return TSDB_CODE_VG_INIT_FAILED; + } + + if (vnodeInitStoreVnode(vnode) != 0) { + return TSDB_CODE_VG_COMMITLOG_INIT_FAILED; + } + + return vnodeOpenVnode(vnode); +} + +void vnodeRemoveDataFiles(int vnode) { + char vnodeDir[TSDB_FILENAME_LEN]; + char dfilePath[TSDB_FILENAME_LEN]; + char linkFile[TSDB_FILENAME_LEN]; + struct dirent *de = NULL; + DIR * dir = NULL; + + sprintf(vnodeDir, "%s/vnode%d/db", tsDirectory, vnode); + dir = opendir(vnodeDir); + if (dir == NULL) return; + while ((de = readdir(dir)) != NULL) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue; + if ((strcmp(de->d_name + strlen(de->d_name) - strlen(".head"), ".head") == 0 || + strcmp(de->d_name + strlen(de->d_name) - strlen(".data"), ".data") == 0 || + strcmp(de->d_name + strlen(de->d_name) - strlen(".last"), ".last") == 0) && + (de->d_type & DT_LNK)) { + sprintf(linkFile, "%s/%s", vnodeDir, de->d_name); + + memset(dfilePath, 0, TSDB_FILENAME_LEN); + int tcode = readlink(linkFile, dfilePath, TSDB_FILENAME_LEN); + remove(linkFile); + + if (tcode >= 0) { + remove(dfilePath); + dTrace("Data file %s is removed, link file %s", dfilePath, linkFile); + } + } else { + remove(de->d_name); + } + } + + closedir(dir); + rmdir(vnodeDir); + + sprintf(vnodeDir, "%s/vnode%d/meterObj.v%d", tsDirectory, vnode, vnode); + remove(vnodeDir); + + sprintf(vnodeDir, "%s/vnode%d", tsDirectory, vnode); + rmdir(vnodeDir); + dTrace("vnode %d is removed!", vnode); +} + +void vnodeRemoveVnode(int vnode) { + if (vnodeList == NULL) return; + + if (vnodeList[vnode].cfg.maxSessions > 0) { + vnodeCloseVnode(vnode); + + vnodeRemoveDataFiles(vnode); + + // sprintf(cmd, "rm -rf %s/vnode%d", tsDirectory, vnode); + // if ( system(cmd) < 0 ) { + // dError("vid:%d, failed to run command %s vnode, reason:%s", vnode, cmd, strerror(errno)); + // } else { + // dTrace("vid:%d, this vnode is deleted!!!", vnode); + // } + } else { + dTrace("vid:%d, max sessions:%d, this vnode already dropped!!!", vnode, vnodeList[vnode].cfg.maxSessions); + vnodeList[vnode].cfg.maxSessions = 0; + vnodeCalcOpenVnodes(); + } +} + +int vnodeInitStore() { + int vnode; + int size; + + size = sizeof(SVnodeObj) * TSDB_MAX_VNODES; + vnodeList = (SVnodeObj *)malloc(size); + if (vnodeList == NULL) return -1; + memset(vnodeList, 0, size); + + for (vnode = 0; vnode < TSDB_MAX_VNODES; ++vnode) { + if (vnodeInitStoreVnode(vnode) < 0) { + // one vnode is failed to recover from commit log, continue for remain + return -1; + } + } + + return 0; +} + +int vnodeInitVnodes() { + int vnode; + + for (vnode = 0; vnode < TSDB_MAX_VNODES; ++vnode) { + if (vnodeOpenVnode(vnode) < 0) return -1; + } + + return 0; +} + +void vnodeCleanUpVnodes() { + static int again = 0; + if (vnodeList == NULL) return; + + pthread_mutex_lock(&dmutex); + + if (again) { + pthread_mutex_unlock(&dmutex); + return; + } + again = 1; + + for (int vnode = 0; vnode < TSDB_MAX_VNODES; ++vnode) { + if (vnodeList[vnode].pCachePool) { + vnodeList[vnode].status = TSDB_STATUS_OFFLINE; + } + } + + pthread_mutex_unlock(&dmutex); + + for (int vnode = 0; vnode < TSDB_MAX_VNODES; ++vnode) { + if (vnodeList[vnode].pCachePool) { + vnodeProcessCommitTimer(vnodeList + vnode, NULL); + while (vnodeList[vnode].commitThread != 0) { + taosMsleep(10); + } + vnodeCleanUpCommit(vnode); + } + } +} + +void vnodeCalcOpenVnodes() { + int openVnodes = 0; + for (int vnode = 0; vnode <= tsMaxVnode; ++vnode) { + if (vnodeList[vnode].cfg.maxSessions <= 0) continue; + openVnodes++; + } + + __sync_val_compare_and_swap(&tsOpenVnodes, tsOpenVnodes, openVnodes); +} diff --git a/src/system/src/vnodeStream.c b/src/system/src/vnodeStream.c new file mode 100644 index 000000000000..f2a0eeccbb2c --- /dev/null +++ b/src/system/src/vnodeStream.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "vnode.h" +#include + +/* static TAOS *dbConn = NULL; */ +void vnodeCloseStreamCallback(void *param); + +void vnodeProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { + SMeterObj *pObj = (SMeterObj *)param; + dTrace("vid:%d sid:%d id:%s, stream result is ready", pObj->vnode, pObj->sid, pObj->meterId); + + // construct data + int32_t contLen = pObj->bytesPerPoint; + char * pTemp = calloc(1, sizeof(SSubmitMsg) + pObj->bytesPerPoint + sizeof(SVMsgHeader)); + SSubmitMsg *pMsg = (SSubmitMsg *)(pTemp + sizeof(SVMsgHeader)); + + pMsg->numOfRows = htons(1); + + char ncharBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; + + int32_t offset = 0; + for (int32_t i = 0; i < pObj->numOfColumns; ++i) { + char *dst = row[i]; + if (dst == NULL) { + setNull(pMsg->payLoad + offset, pObj->schema[i].type, pObj->schema[i].bytes); + } else { + // here, we need to transfer nchar(utf8) to unicode(ucs-4) + if (pObj->schema[i].type == TSDB_DATA_TYPE_NCHAR) { + taosMbsToUcs4(row[i], pObj->schema[i].bytes, ncharBuf, TSDB_MAX_BYTES_PER_ROW); + dst = ncharBuf; + } + + memcpy(pMsg->payLoad + offset, dst, pObj->schema[i].bytes); + } + + offset += pObj->schema[i].bytes; + } + + contLen += sizeof(SSubmitMsg); + int32_t numOfPoints = 0; + vnodeInsertPoints(pObj, (char *)pMsg, contLen, TSDB_DATA_SOURCE_SHELL, NULL, pObj->sversion, &numOfPoints); + + assert(numOfPoints >= 0 && numOfPoints <= 1); + tfree(pTemp); +} + +static void vnodeGetDBFromMeterId(SMeterObj *pObj, char *db) { + char *st = strstr(pObj->meterId, "."); + char *end = strstr(st + 1, "."); + + memcpy(db, st + 1, end - (st + 1)); +} + +void vnodeOpenStreams(void *param, void *tmrId) { + SVnodeObj *pVnode = (SVnodeObj *)param; + SMeterObj *pObj; + + if (pVnode->meterList == NULL) return; + + taosTmrStopA(&pVnode->streamTimer); + pVnode->streamTimer = NULL; + + for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { + pObj = pVnode->meterList[sid]; + if (pObj == NULL || pObj->sqlLen == 0 || pObj->status == 1 || pObj->state == TSDB_METER_STATE_DELETED) continue; + + dTrace("vid:%d sid:%d id:%s, open stream:%s", pObj->vnode, sid, pObj->meterId, pObj->pSql); + + if (pVnode->dbConn == NULL) { + char db[64] = {0}; + char user[64] = {0}; + vnodeGetDBFromMeterId(pObj, db); + sprintf(user, "_%s", pVnode->cfg.acct); + pVnode->dbConn = taos_connect(NULL, user, tsInternalPass, db, 0); + } + + if (pVnode->dbConn == NULL) { + dError("vid:%d, failed to connect to mgmt node: %s", pVnode->vnode, tsInternalIp); + taosTmrReset(vnodeOpenStreams, 1000, param, vnodeTmrCtrl, &pVnode->streamTimer); + return; + } + + if (pObj->pStream == NULL) { + pObj->pStream = taos_open_stream(pVnode->dbConn, pObj->pSql, vnodeProcessStreamRes, pObj->lastKey, pObj, + vnodeCloseStreamCallback); + if (pObj->pStream) pVnode->numOfStreams++; + } + } +} + +void vnodeCreateStream(SMeterObj *pObj) { + if (pObj->sqlLen <= 0) return; + + SVnodeObj *pVnode = vnodeList + pObj->vnode; + + if (pObj->pStream) return; + + dTrace("vid:%d sid:%d id:%s stream:%s is created", pObj->vnode, pObj->sid, pObj->meterId, pObj->pSql); + if (pVnode->dbConn == NULL) { + if (pVnode->streamTimer == NULL) taosTmrReset(vnodeOpenStreams, 1000, pVnode, vnodeTmrCtrl, &pVnode->streamTimer); + } else { + pObj->pStream = taos_open_stream(pVnode->dbConn, pObj->pSql, vnodeProcessStreamRes, pObj->lastKey, pObj, + vnodeCloseStreamCallback); + if (pObj->pStream) pVnode->numOfStreams++; + } +} + +// Close only one stream +void vnodeRemoveStream(SMeterObj *pObj) { + SVnodeObj *pVnode = vnodeList + pObj->vnode; + if (pObj->sqlLen <= 0) return; + + if (pObj->pStream) { + taos_close_stream(pObj->pStream); + pVnode->numOfStreams--; + } + + pObj->pStream = NULL; + if (pVnode->numOfStreams == 0) { + taos_close(pVnode->dbConn); + pVnode->dbConn = NULL; + } + + dTrace("vid:%d sid:%d id:%d stream is removed", pObj->vnode, pObj->sid, pObj->meterId); +} + +// Close all streams in a vnode +void vnodeCloseStream(SVnodeObj *pVnode) { + SMeterObj *pObj; + dTrace("vid:%d, stream is closed", pVnode->vnode); + + // stop stream computing + for (int sid = 0; sid < pVnode->cfg.maxSessions; ++sid) { + pObj = pVnode->meterList[sid]; + if (pObj == NULL) continue; + if (pObj->sqlLen > 0 && pObj->pStream) { + taos_close_stream(pObj->pStream); + pVnode->numOfStreams--; + } + pObj->pStream = NULL; + } +} + +// Callback function called from client +void vnodeCloseStreamCallback(void *param) { + SMeterObj *pMeter = (SMeterObj *)param; + SVnodeObj *pVnode = NULL; + + if (pMeter == NULL || pMeter->sqlLen == 0) return; + pVnode = vnodeList + pMeter->vnode; + + pMeter->sqlLen = 0; + pMeter->pSql = NULL; + pMeter->pStream = NULL; + + pVnode->numOfStreams--; + + if (pVnode->numOfStreams == 0) { + taos_close(pVnode->dbConn); + pVnode->dbConn = NULL; + } + + vnodeSaveMeterObjToFile(pMeter); +} \ No newline at end of file diff --git a/src/system/src/vnodeSystem.c b/src/system/src/vnodeSystem.c new file mode 100644 index 000000000000..8d3b66e52008 --- /dev/null +++ b/src/system/src/vnodeSystem.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsdb.h" +#include "tsocket.h" +#include "vnode.h" + +// internal global, not configurable +void * vnodeTmrCtrl; +void * rpcQhandle; +void * dmQhandle; +void * queryQhandle; +uint32_t tsRebootTime; + +int vnodeInitSystem() { + int numOfThreads; + + numOfThreads = tsRatioOfQueryThreads * tsNumOfCores * tsNumOfThreadsPerCore; + if (numOfThreads < 1) numOfThreads = 1; + queryQhandle = taosInitScheduler(tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode, numOfThreads, "query"); + + // numOfThreads = (1.0 - tsRatioOfQueryThreads) * tsNumOfCores * tsNumOfThreadsPerCore / 2.0; + // if (numOfThreads < 1) numOfThreads = 1; + rpcQhandle = taosInitScheduler(tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode, 1, "dnode"); + + vnodeTmrCtrl = taosTmrInit(tsSessionsPerVnode + 1000, 200, 60000, "DND-vnode"); + if (vnodeTmrCtrl == NULL) { + dError("failed to init timer, exit"); + return -1; + } + + if (vnodeInitStore() < 0) { + dError("failed to init vnode storage"); + return -1; + } + + if (vnodeInitShell() < 0) { + dError("failed to init communication to shell"); + return -1; + } + + if (vnodeInitVnodes() < 0) { + dError("failed to init store"); + return -1; + } + + dPrint("vnode is initialized successfully"); + + return 0; +} + +void vnodeInitQHandle() { + // int numOfThreads = (1.0 - tsRatioOfQueryThreads) * tsNumOfCores * tsNumOfThreadsPerCore / 2.0; + // if (numOfThreads < 1) numOfThreads = 1; + rpcQhandle = taosInitScheduler(tsNumOfVnodesPerCore * tsNumOfCores * tsSessionsPerVnode, 1, "dnode"); + + dmQhandle = taosInitScheduler(tsSessionsPerVnode, 1, "mgmt"); +} diff --git a/src/system/src/vnodeUtil.c b/src/system/src/vnodeUtil.c new file mode 100644 index 000000000000..02af8d0c0260 --- /dev/null +++ b/src/system/src/vnodeUtil.c @@ -0,0 +1,563 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "tast.h" +#include "tschemautil.h" +#include "vnode.h" +#include "vnodeDataFilterFunc.h" +#include "vnodeUtil.h" + +int vnodeCheckFileIntegrity(FILE* fp) { + /* + int savedSessions, savedMeterSize; + + fseek(fp, TSDB_FILE_HEADER_LEN/3, SEEK_SET); + fscanf(fp, "%d %d", &savedSessions, &savedMeterSize); + if ( (savedSessions != tsSessionsPerVnode) || (savedMeterSize != tsMeterSizeOnFile) ) { + dError("file structure is changed"); + return -1; + + } + + uint64_t checkSum = 0, savedCheckSum=0; + checkSum = taosGetCheckSum(fp, TSDB_FILE_HEADER_LEN); + + fseek(fp, TSDB_FILE_HEADER_LEN - cksumsize, SEEK_SET); + fread(&savedCheckSum, cksumsize, 1, fp); + + if ( savedCheckSum != checkSum ) { + dError("check sum is not matched:0x%x 0x%x", checkSum, savedCheckSum); + return -1; + } + */ + return 0; +} + +void vnodeCreateFileHeaderFd(int fd) { + char temp[TSDB_FILE_HEADER_LEN / 4]; + int lineLen; + + lineLen = sizeof(temp); + + // write the first line` + memset(temp, 0, lineLen); + *(int16_t*)temp = vnodeFileVersion; + sprintf(temp + sizeof(int16_t), "tsdb version: %s\n", version); + /* *((int16_t *)(temp + TSDB_FILE_HEADER_LEN/8)) = vnodeFileVersion; */ + lseek(fd, 0, SEEK_SET); + write(fd, temp, lineLen); + + // second line + memset(temp, 0, lineLen); + write(fd, temp, lineLen); + + // the third/forth line is the dynamic info + memset(temp, 0, lineLen); + write(fd, temp, lineLen); + write(fd, temp, lineLen); +} + +void vnodeGetHeadFileHeaderInfo(int fd, SVnodeHeadInfo* pHeadInfo) { + lseek(fd, TSDB_FILE_HEADER_LEN / 4, SEEK_SET); + read(fd, pHeadInfo, sizeof(SVnodeHeadInfo)); +} + +void vnodeUpdateHeadFileHeader(int fd, SVnodeHeadInfo* pHeadInfo) { + lseek(fd, TSDB_FILE_HEADER_LEN / 4, SEEK_SET); + write(fd, pHeadInfo, sizeof(SVnodeHeadInfo)); +} + +void vnodeCreateFileHeader(FILE* fp) { + char temp[TSDB_FILE_HEADER_LEN / 4]; + int lineLen; + + lineLen = sizeof(temp); + + // write the first line` + memset(temp, 0, lineLen); + *(int16_t*)temp = vnodeFileVersion; + sprintf(temp + sizeof(int16_t), "tsdb version: %s\n", version); + /* *((int16_t *)(temp + TSDB_FILE_HEADER_LEN/8)) = vnodeFileVersion; */ + fseek(fp, 0, SEEK_SET); + fwrite(temp, lineLen, 1, fp); + + // second line + memset(temp, 0, lineLen); + fwrite(temp, lineLen, 1, fp); + + // the third line is the dynamic info + memset(temp, 0, lineLen); + fwrite(temp, lineLen, 1, fp); + fwrite(temp, lineLen, 1, fp); +} + +SSqlGroupbyExpr* vnodeCreateGroupbyExpr(SQueryMeterMsg* pQueryMsg, int32_t* code) { + if (pQueryMsg->numOfGroupbyCols == 0) { + return NULL; + } + + // using group by tag columns + SSqlGroupbyExpr* pGroupbyExpr = + (SSqlGroupbyExpr*)malloc(sizeof(SSqlGroupbyExpr) + pQueryMsg->numOfGroupbyCols * sizeof(int16_t)); + if (pGroupbyExpr == NULL) { + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + return NULL; + } + + int16_t* pGroupbyIds = (int16_t*)pQueryMsg->groupbyTagIds; + + pGroupbyExpr->numOfGroupbyCols = pQueryMsg->numOfGroupbyCols; + pGroupbyExpr->orderType = pQueryMsg->orderType; + pGroupbyExpr->orderIdx = pQueryMsg->orderByIdx; + + memcpy(pGroupbyExpr->tagIndex, pGroupbyIds, sizeof(int16_t) * pGroupbyExpr->numOfGroupbyCols); + + return pGroupbyExpr; +} + +static SSchema* toSchema(SQueryMeterMsg* pQuery, SColumnFilterMsg* pCols, int32_t numOfCols) { + char* start = (char*)pQuery->colNameList; + char* end = start; + + SSchema* pSchema = calloc(1, sizeof(SSchema) * numOfCols); + for (int32_t i = 0; i < numOfCols; ++i) { + pSchema[i].type = pCols[i].type; + pSchema[i].bytes = pCols[i].bytes; + pSchema[i].colId = pCols[i].colId; + + end = strstr(start, ","); + memcpy(pSchema[i].name, start, end - start); + start = end + 1; + } + + return pSchema; +} + +static int32_t id_compar(const void* left, const void* right) { + DEFAULT_COMP(GET_INT16_VAL(left), GET_INT16_VAL(right)); +} + +static int32_t vnodeBuildExprFromArithmeticStr(SSqlFunctionExpr* pExpr, SQueryMeterMsg* pQueryMsg) { + SSqlBinaryExprInfo* pBinaryExprInfo = &pExpr->pBinExprInfo; + SColumnFilterMsg* pColMsg = pQueryMsg->colList; + + tSQLBinaryExpr* pBinExpr = NULL; + SSchema* pSchema = toSchema(pQueryMsg, pColMsg, pQueryMsg->numOfCols); + + dTrace("qmsg:%p create binary expr from string:%s", pQueryMsg, pExpr->pBase.arg[0].argValue.pz); + tSQLBinaryExprFromString(&pBinExpr, pSchema, pQueryMsg->numOfCols, pExpr->pBase.arg[0].argValue.pz, + pExpr->pBase.arg[0].argBytes); + + if (pBinExpr == NULL) { + dError("qmsg:%p failed to create arithmetic expression string from:%s", pQueryMsg, pExpr->pBase.arg[0].argValue.pz); + return TSDB_CODE_APP_ERROR; + } + + pBinaryExprInfo->pBinExpr = pBinExpr; + + int32_t num = 0; + int16_t ids[TSDB_MAX_COLUMNS] = {0}; + + tSQLBinaryExprTrv(pBinExpr, &num, ids); + qsort(ids, num, sizeof(int16_t), id_compar); + + int32_t i = 0, j = 0; + + while (i < num && j < num) { + if (ids[i] == ids[j]) { + j++; + } else { + ids[++i] = ids[j++]; + } + } + assert(i <= num); + + // there may be duplicated referenced columns. + num = i + 1; + pBinaryExprInfo->pReqColumns = malloc(sizeof(SColIndexEx) * num); + + for (int32_t i = 0; i < num; ++i) { + SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[i]; + pColIndex->colId = ids[i]; + } + + pBinaryExprInfo->numOfCols = num; + free(pSchema); + + return TSDB_CODE_SUCCESS; +} + +static int32_t getColumnIndexInSource(SQueryMeterMsg* pQueryMsg, SSqlFuncExprMsg* pExprMsg) { + int32_t j = 0; + + while(j < pQueryMsg->numOfCols) { + if (pExprMsg->colInfo.colId == pQueryMsg->colList[j].colId) { + break; + } + + j += 1; + } + + return j; +} + +bool vnodeValidateExprColumnInfo(SQueryMeterMsg* pQueryMsg, SSqlFuncExprMsg* pExprMsg) { + int32_t j = getColumnIndexInSource(pQueryMsg, pExprMsg); + return j < pQueryMsg->numOfCols; +} + +SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t* code) { + SSqlFunctionExpr* pExprs = (SSqlFunctionExpr*)calloc(1, sizeof(SSqlFunctionExpr) * pQueryMsg->numOfOutputCols); + if (pExprs == NULL) { + tfree(pQueryMsg->pSqlFuncExprs); + + *code = TSDB_CODE_SERV_OUT_OF_MEMORY; + return NULL; + } + + SSchema* pTagSchema = (SSchema*)pQueryMsg->pTagSchema; + for (int32_t i = 0; i < pQueryMsg->numOfOutputCols; ++i) { + pExprs[i].pBase = *((SSqlFuncExprMsg**)pQueryMsg->pSqlFuncExprs)[i]; // todo pExprs responsible for release memory + pExprs[i].resBytes = 0; + + int16_t type = 0; + int16_t bytes = 0; + + SColIndexEx* pColumnIndexExInfo = &pExprs[i].pBase.colInfo; + + // tag column schema is kept in pQueryMsg->pTagSchema + if (pColumnIndexExInfo->isTag) { + assert(pColumnIndexExInfo->colIdx < pQueryMsg->numOfTagsCols); + + type = pTagSchema[pColumnIndexExInfo->colIdx].type; + bytes = pTagSchema[pColumnIndexExInfo->colIdx].bytes; + + } else { // parse the arithmetic expression + if (pExprs[i].pBase.functionId == TSDB_FUNC_ARITHM) { + *code = vnodeBuildExprFromArithmeticStr(&pExprs[i], pQueryMsg); + + if (*code != TSDB_CODE_SUCCESS) { + tfree(pExprs); + break; + } + + type = TSDB_DATA_TYPE_DOUBLE; + bytes = tDataTypeDesc[type].nSize; + } else { // parse the normal column + int32_t j = getColumnIndexInSource(pQueryMsg, &pExprs[i].pBase); + assert(j < pQueryMsg->numOfCols); + + SColumnFilterMsg* pCol = &pQueryMsg->colList[j]; + type = pCol->type; + bytes = pCol->bytes; + } + } + + int32_t param = pExprs[i].pBase.arg[0].argValue.i64; + getResultInfo(type, bytes, pExprs[i].pBase.functionId, param, &pExprs[i].resType, &pExprs[i].resBytes); + + assert(pExprs[i].resType != 0 && pExprs[i].resBytes != 0); + } + + tfree(pQueryMsg->pSqlFuncExprs); + return pExprs; +} + +bool vnodeIsValidVnodeCfg(SVnodeCfg* pCfg) { + if (pCfg == NULL) return false; + + if (pCfg->maxSessions <= 0 || pCfg->cacheBlockSize <= 0 || pCfg->daysPerFile <= 0 || pCfg->daysToKeep <= 0) { + return false; + } + + return true; +} + +/** + * compare if schema of two tables are identical. + * when multi-table query is issued, the schemas of all requested tables + * should be identical. Otherwise,query process will abort. + */ +bool vnodeMeterSchemaIdentical(SColumn* pSchema1, int32_t numOfCols1, SColumn* pSchema2, int32_t numOfCols2) { + if (!VALIDNUMOFCOLS(numOfCols1) || !VALIDNUMOFCOLS(numOfCols2) || numOfCols1 != numOfCols2) { + return false; + } + + return memcmp((char*)pSchema1, (char*)pSchema2, sizeof(SColumn) * numOfCols1) == 0; +} + +void vnodeFreeFields(SQuery* pQuery) { + if (pQuery == NULL || pQuery->pFields == NULL) { + return; + } + + for (int32_t i = 0; i < pQuery->numOfBlocks; ++i) { + tfree(pQuery->pFields[i]); + } + + /* + * pQuery->pFields does not need to be released, it is allocated at the last part of pBlock + * so free(pBlock) can release this memory at the same time. + */ + pQuery->pFields = NULL; + pQuery->numOfBlocks = 0; +} + +void vnodeUpdateFilterColumnIndex(SQuery* pQuery) { + for (int32_t i = 0; i < pQuery->numOfFilterCols; ++i) { + for (int16_t j = 0; j < pQuery->numOfCols; ++j) { + if (pQuery->pFilterInfo[i].pFilter.data.colId == pQuery->colList[j].data.colId) { + pQuery->pFilterInfo[i].pFilter.colIdx = pQuery->colList[j].colIdx; + pQuery->pFilterInfo[i].pFilter.colIdxInBuf = pQuery->colList[j].colIdxInBuf; + + // supplementary scan is also require this column + pQuery->colList[j].req[1] = 1; + break; + } + } + } + + // set the column index in buffer for arithmetic operation + if (pQuery->pSelectExpr != NULL) { + for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SSqlBinaryExprInfo* pBinExprInfo = &pQuery->pSelectExpr[i].pBinExprInfo; + if (pBinExprInfo->pBinExpr != NULL) { + for (int16_t j = 0; j < pBinExprInfo->numOfCols; ++j) { + for (int32_t k = 0; k < pQuery->numOfCols; ++k) { + if (pBinExprInfo->pReqColumns[j].colId == pQuery->colList[k].data.colId) { + pBinExprInfo->pReqColumns[j].colIdxInBuf = pQuery->colList[k].colIdxInBuf; + assert(pQuery->colList[k].colIdxInBuf == k); + break; + } + } + } + } + } + } +} + +// TODO support k<12 and k<>9 +int32_t vnodeCreateFilterInfo(SQuery* pQuery) { + for (int32_t i = 0; i < pQuery->numOfCols; ++i) { + if (pQuery->colList[i].data.filterOn > 0) { + pQuery->numOfFilterCols++; + } + } + + if (pQuery->numOfFilterCols == 0) { + return TSDB_CODE_SUCCESS; + } + + pQuery->pFilterInfo = calloc(1, sizeof(SColumnFilterInfo) * pQuery->numOfFilterCols); + + for (int32_t i = 0, j = 0; i < pQuery->numOfCols; ++i) { + if (pQuery->colList[i].data.filterOn > 0) { + pQuery->pFilterInfo[j].pFilter = pQuery->colList[i]; + SColumnFilterInfo* pFilterInfo = &pQuery->pFilterInfo[j]; + + int32_t lower = pFilterInfo->pFilter.data.lowerRelOptr; + int32_t upper = pFilterInfo->pFilter.data.upperRelOptr; + + int16_t type = pQuery->colList[i].data.type; + int16_t bytes = pQuery->colList[i].data.bytes; + + __filter_func_t* rangeFilterArray = vnodeGetRangeFilterFuncArray(type); + __filter_func_t* filterArray = vnodeGetValueFilterFuncArray(type); + + if (rangeFilterArray == NULL && filterArray == NULL) { + dError("QInfo:%p failed to get filter function, invalid data type:%d", type); + return TSDB_CODE_APP_ERROR; + } + + if ((lower == TSDB_RELATION_LARGE_EQUAL || lower == TSDB_RELATION_LARGE) && + (upper == TSDB_RELATION_LESS_EQUAL || upper == TSDB_RELATION_LESS)) { + if (lower == TSDB_RELATION_LARGE_EQUAL) { + if (upper == TSDB_RELATION_LESS_EQUAL) { + pFilterInfo->fp = rangeFilterArray[4]; + } else { + pFilterInfo->fp = rangeFilterArray[2]; + } + } else { + assert(lower == TSDB_RELATION_LARGE); + + if (upper == TSDB_RELATION_LESS_EQUAL) { + pFilterInfo->fp = rangeFilterArray[3]; + } else { + pFilterInfo->fp = rangeFilterArray[1]; + } + } + } else { // set callback filter function + if (lower != 0) { + pFilterInfo->fp = filterArray[lower]; + assert(upper == 0); + } else { + pFilterInfo->fp = filterArray[upper]; + } + } + pFilterInfo->elemSize = bytes; + j++; + } + } + + return TSDB_CODE_SUCCESS; +} + +bool vnodeDoFilterData(SQuery* pQuery, int32_t elemPos) { + for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { + SColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; + char* pElem = pFilterInfo->pData + pFilterInfo->elemSize * elemPos; + + if(isNull(pElem, pFilterInfo->pFilter.data.type)) { + return false; + } + + if (!pFilterInfo->fp(&pFilterInfo->pFilter, pElem, pElem)) { + return false; + } + } + + return true; +} + +bool vnodeFilterData(SQuery* pQuery, int32_t* numOfActualRead, int32_t index) { + (*numOfActualRead)++; + if (!vnodeDoFilterData(pQuery, index)) { + return false; + } + + if (pQuery->limit.offset > 0) { + pQuery->limit.offset--; // ignore this qualified row + return false; + } + + return true; +} + +bool vnodeIsProjectionQuery(SSqlFunctionExpr* pExpr, int32_t numOfOutput) { + for (int32_t i = 0; i < numOfOutput; ++i) { + if (pExpr[i].pBase.functionId != TSDB_FUNC_PRJ) { + return false; + } + } + + return true; +} + +int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSids, SMeterObj** pMeterObjList, + int32_t* numOfInc) { + SVnodeObj* pVnode = &vnodeList[pQueryMsg->vnode]; + + int32_t num = 0; + int32_t code = TSDB_CODE_SUCCESS; + + // check all meter metadata to ensure all metadata are identical. + for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { + SMeterObj* pMeter = pVnode->meterList[pSids[i]->sid]; + + if (pMeter == NULL || pMeter->state != TSDB_METER_STATE_READY) { + if (pMeter == NULL) { + code = TSDB_CODE_NOT_ACTIVE_SESSION; + dError("qmsg:%p, vid:%d sid:%d, not there", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid); + vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); + } else { + code = TSDB_CODE_ACTION_IN_PROGRESS; + dTrace("qmsg:%p, vid:%d sid:%d id:%s, it is in state:%d, wait!", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid, + pMeter->meterId, pMeter->state); + } + } else { + pMeterObjList[(*numOfInc)++] = pMeter; + __sync_fetch_and_add(&pMeter->numOfQueries, 1); + + // output for meter more than one query executed + if (pMeter->numOfQueries > 1) { + dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, + pMeter->meterId, pMeter->numOfQueries); + num++; + } + } + } + + dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1", pQueryMsg, + pQueryMsg->numOfSids, *numOfInc, (*numOfInc) - num); + + return code; +} + +void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfInc) { + int32_t num = 0; + + for (int32_t i = 0; i < numOfInc; ++i) { + SMeterObj* pMeter = pMeterObjList[i]; + + if (pMeter != NULL) { // here, do not need to lock to perform operations + assert(pMeter->state != TSDB_METER_STATE_DELETING && pMeter->state != TSDB_METER_STATE_DELETED); + __sync_fetch_and_sub(&pMeter->numOfQueries, 1); + + if (pMeter->numOfQueries > 0) { + dTrace("qmsg:%p, vid:%d sid:%d id:%s dec query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid, + pMeter->meterId, pMeter->numOfQueries); + num++; + } + } + } + + dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfInc, numOfInc - num); +} + +void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj) { + if (pQuery == NULL || pMeterObj == NULL) { + return; + } + + int32_t i = 0, j = 0; + while (i < pQuery->numOfCols && j < pMeterObj->numOfColumns) { + if (pQuery->colList[i].data.colId == pMeterObj->schema[j].colId) { + pQuery->colList[i++].colIdx = (int16_t)j++; + } else if (pQuery->colList[i].data.colId < pMeterObj->schema[j].colId) { + pQuery->colList[i++].colIdx = -1; + } else if (pQuery->colList[i].data.colId > pMeterObj->schema[j].colId) { + j++; + } + } + + while (i < pQuery->numOfCols) { + pQuery->colList[i++].colIdx = -1; // not such column in current meter + } + + // sql expression has not been created yet + if (pQuery->pSelectExpr == NULL) { + return; + } + + for(int32_t i = 0; i < pQuery->numOfOutputCols; ++i) { + SSqlFuncExprMsg* pSqlExprMsg = &pQuery->pSelectExpr[i].pBase; + if (pSqlExprMsg->functionId == TSDB_FUNC_ARITHM || pSqlExprMsg->colInfo.isTag == true) { + continue; + } + + SColIndexEx* pColIndexEx = &pSqlExprMsg->colInfo; + for(int32_t j = 0; j < pQuery->numOfCols; ++j) { + if (pColIndexEx->colId == pQuery->colList[j].data.colId) { + pColIndexEx->colIdx = pQuery->colList[j].colIdx; + break; + } + } + } +} diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt new file mode 100755 index 000000000000..a19060ee5240 --- /dev/null +++ b/src/util/CMakeLists.txt @@ -0,0 +1,28 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) + +PROJECT(TDengine) + +AUX_SOURCE_DIRECTORY(./src SRC) +INCLUDE_DIRECTORIES(${PRJ_HEADER_PATH} ./inc) + +ADD_LIBRARY(tutil ${SRC}) +TARGET_LINK_LIBRARIES(tutil m pthread) + +FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) +IF (ICONV_INCLUDE_EXIST) + ADD_DEFINITIONS(-DUSE_LIBICONV) + + FIND_PATH(ICONV_LIBRARY_A_EXIST libiconv.a /usr/lib/ /usr/local/lib/ /lib64) + FIND_PATH(ICONV_LIBRARY_SO_EXIST libiconv.so /usr/lib/ /usr/local/lib/ /lib64) + IF (ICONV_LIBRARY_A_EXIST OR ICONV_LIBRARY_SO_EXIST) + MESSAGE(STATUS "Use the installed libiconv library") + TARGET_LINK_LIBRARIES(tutil iconv) + ELSE () + # libiconv library is already included in GLIBC, + MESSAGE(STATUS "Use the iconv functions in GLIBC") + ENDIF () +ELSE () + MESSAGE(STATUS "Failed to find iconv, use default encoding method") +ENDIF () + + diff --git a/src/util/src/ihash.c b/src/util/src/ihash.c new file mode 100644 index 000000000000..88f4a7b47dc5 --- /dev/null +++ b/src/util/src/ihash.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +typedef struct _str_node_t { + int32_t key; + struct _str_node_t *prev; + struct _str_node_t *next; + char data[]; +} IHashNode; + +typedef struct { + IHashNode **hashList; + int32_t maxSessions; + int32_t dataSize; + int32_t (*hashFp)(void *, int32_t key); + pthread_mutex_t mutex; +} IHashObj; + +int32_t taosHashInt(void *handle, int32_t key) { + IHashObj *pObj = (IHashObj *)handle; + int32_t hash = 0; + hash = key % pObj->maxSessions; + return hash; +} + +char *taosAddIntHash(void *handle, int32_t key, char *pData) { + int32_t hash; + IHashNode *pNode; + IHashObj * pObj; + + pObj = (IHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = (*pObj->hashFp)(pObj, key); + + pthread_mutex_lock(&pObj->mutex); + + pNode = (IHashNode *)malloc(sizeof(IHashNode) + (size_t)pObj->dataSize); + pNode->key = key; + if (pData != NULL) { + memcpy(pNode->data, pData, (size_t)pObj->dataSize); + } + pNode->prev = 0; + pNode->next = pObj->hashList[hash]; + + if (pObj->hashList[hash] != 0) (pObj->hashList[hash])->prev = pNode; + pObj->hashList[hash] = pNode; + + pthread_mutex_unlock(&pObj->mutex); + + return (char *)pNode->data; +} + +void taosDeleteIntHash(void *handle, int32_t key) { + int32_t hash; + IHashNode *pNode; + IHashObj * pObj; + + pObj = (IHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + hash = (*(pObj->hashFp))(pObj, key); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->hashList[hash]; + while (pNode) { + if (pNode->key == key) break; + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->hashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + free(pNode); + } + + pthread_mutex_unlock(&pObj->mutex); +} + +char *taosGetIntHashData(void *handle, int32_t key) { + int32_t hash; + IHashNode *pNode; + IHashObj * pObj; + + pObj = (IHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + hash = (*pObj->hashFp)(pObj, key); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->hashList[hash]; + + while (pNode) { + if (pNode->key == key) { + break; + } + + pNode = pNode->next; + } + + pthread_mutex_unlock(&pObj->mutex); + + if (pNode) return pNode->data; + + return NULL; +} + +void *taosInitIntHash(int32_t maxSessions, int32_t dataSize, int32_t (*fp)(void *, int32_t)) { + IHashObj *pObj; + + pObj = (IHashObj *)malloc(sizeof(IHashObj)); + if (pObj == NULL) { + return NULL; + } + + memset(pObj, 0, sizeof(IHashObj)); + pObj->maxSessions = maxSessions; + pObj->dataSize = dataSize; + pObj->hashFp = fp; + + pObj->hashList = (IHashNode **)malloc(sizeof(IHashNode *) * (size_t)maxSessions); + if (pObj->hashList == NULL) { + free(pObj); + return NULL; + } + memset(pObj->hashList, 0, sizeof(IHashNode *) * (size_t)maxSessions); + + pthread_mutex_init(&pObj->mutex, NULL); + + return pObj; +} + +void taosCleanUpIntHash(void *handle) { + IHashObj * pObj; + IHashNode *pNode, *pNext; + + pObj = (IHashObj *)handle; + if (pObj == NULL || pObj->maxSessions <= 0) return; + + pthread_mutex_lock(&pObj->mutex); + + if (pObj->hashList) { + for (int32_t i = 0; i < pObj->maxSessions; ++i) { + pNode = pObj->hashList[i]; + while (pNode) { + pNext = pNode->next; + free(pNode); + pNode = pNext; + } + } + + free(pObj->hashList); + } + + pthread_mutex_unlock(&pObj->mutex); + + pthread_mutex_destroy(&pObj->mutex); + + memset(pObj, 0, sizeof(IHashObj)); + free(pObj); +} diff --git a/src/util/src/lz4.c b/src/util/src/lz4.c new file mode 100644 index 000000000000..6a40456f2f0f --- /dev/null +++ b/src/util/src/lz4.c @@ -0,0 +1,1703 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (C) 2011-2017, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ + +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4_HEAPMODE : + * Select how default compression functions will allocate memory for their hash + * table, + * in memory stack (0:default, fastest), or in memory heap (1:requires + * malloc()). + */ +#ifndef LZ4_HEAPMODE +#define LZ4_HEAPMODE 0 +#endif + +/* + * ACCELERATION_DEFAULT : + * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 + */ +#define ACCELERATION_DEFAULT 1 + +/*-************************************ +* CPU Feature Detection +**************************************/ +/* LZ4_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * The below switch allow to select different access method for improved + * performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not + * portable). + * This method is safe if your compiler supports it, and *generally* + * as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets which assembly generation + * depends on alignment. + * But in some circumstances, it's the only known way to get the most + * performance (ie GCC + ARMv6) + * See + * https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html + * for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ +#if defined(__GNUC__) && \ + (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ + defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ + defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)) +#define LZ4_FORCE_MEMORY_ACCESS 2 +#elif defined(__INTEL_COMPILER) || defined(__GNUC__) +#define LZ4_FORCE_MEMORY_ACCESS 1 +#endif +#endif + +/* + * LZ4_FORCE_SW_BITCOUNT + * Define this parameter if your target system or compiler does not support + * hardware bit count + */ +#if defined(_MSC_VER) && \ + defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support \ + Hardware bit count */ +#define LZ4_FORCE_SW_BITCOUNT +#endif + +/*-************************************ +* Dependency +**************************************/ +#include "lz4.h" +/* see also "memory routines" below */ + +/*-************************************ +* Compiler Options +**************************************/ +#ifdef _MSC_VER /* Visual Studio */ +#include +#pragma warning( \ + disable : 4127) /* disable: C4127: conditional expression is constant */ +#pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) \ + */ +#endif /* _MSC_VER */ + +#ifndef LZ4_FORCE_INLINE +#ifdef _MSC_VER /* Visual Studio */ +#define LZ4_FORCE_INLINE static __forceinline +#else +#if defined(__cplusplus) || \ + defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +#ifdef __GNUC__ +#define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) +#else +#define LZ4_FORCE_INLINE static inline +#endif +#else +#define LZ4_FORCE_INLINE static +#endif /* __STDC_VERSION__ */ +#endif /* _MSC_VER */ +#endif /* LZ4_FORCE_INLINE */ + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) || \ + (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \ + defined(__clang__) +#define expect(expr, value) (__builtin_expect((expr), (value))) +#else +#define expect(expr, value) (expr) +#endif + +#define likely(expr) expect((expr) != 0, 1) +#define unlikely(expr) expect((expr) != 0, 0) + +/*-************************************ +* Memory routines +**************************************/ +#include /* malloc, calloc, free */ +#define ALLOCATOR(n, s) calloc(n, s) +#define FREEMEM free +#include /* memset, memcpy */ +#define MEM_INIT memset + +/*-************************************ +* Basic Types +**************************************/ +#if defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +#include +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +typedef uintptr_t uptrval; +#else +typedef unsigned char BYTE; +typedef unsigned short U16; +typedef unsigned int U32; +typedef signed int S32; +typedef unsigned long long U64; +typedef size_t uptrval; /* generally true, except OpenVMS-64 */ +#endif + +#if defined(__x86_64__) +typedef U64 reg_t; /* 64-bits in x32 mode */ +#else +typedef size_t reg_t; /* 32-bits in x32 mode */ +#endif + +/*-************************************ +* Reading and writing into memory +**************************************/ +static unsigned LZ4_isLittleEndian(void) { + const union { + U32 u; + BYTE c[4]; + } one = {1}; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2) +/* lie to the compiler about data alignment; use with caution */ + +static U16 LZ4_read16(const void* memPtr) { return *(const U16*)memPtr; } +static U32 LZ4_read32(const void* memPtr) { return *(const U32*)memPtr; } +static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*)memPtr; } + +static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } + +#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1) + +/* __pack instructions are safer, but compiler specific, hence potentially + * problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { + U16 u16; + U32 u32; + reg_t uArch; +} __attribute__((packed)) unalign; + +static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { + return ((const unalign*)ptr)->uArch; +} + +static void LZ4_write16(void* memPtr, U16 value) { + ((unalign*)memPtr)->u16 = value; +} +static void LZ4_write32(void* memPtr, U32 value) { + ((unalign*)memPtr)->u32 = value; +} + +#else /* safe and portable access through memcpy() */ + +static U16 LZ4_read16(const void* memPtr) { + U16 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +static U32 LZ4_read32(const void* memPtr) { + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +static reg_t LZ4_read_ARCH(const void* memPtr) { + reg_t val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +static void LZ4_write16(void* memPtr, U16 value) { + memcpy(memPtr, &value, sizeof(value)); +} + +static void LZ4_write32(void* memPtr, U32 value) { + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* LZ4_FORCE_MEMORY_ACCESS */ + +static U16 LZ4_readLE16(const void* memPtr) { + if (LZ4_isLittleEndian()) { + return LZ4_read16(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)((U16)p[0] + (p[1] << 8)); + } +} + +static void LZ4_writeLE16(void* memPtr, U16 value) { + if (LZ4_isLittleEndian()) { + LZ4_write16(memPtr, value); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE)value; + p[1] = (BYTE)(value >> 8); + } +} + +static void LZ4_copy8(void* dst, const void* src) { memcpy(dst, src, 8); } + +/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd + */ +static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) { + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { + LZ4_copy8(d, s); + d += 8; + s += 8; + } while (d < e); +} + +/*-************************************ +* Common Constants +**************************************/ +#define MINMATCH 4 + +#define WILDCOPYLENGTH 8 +#define LASTLITERALS 5 +#define MFLIMIT (WILDCOPYLENGTH + MINMATCH) +static const int LZ4_minLength = (MFLIMIT + 1); + +#define KB *(1 << 10) +#define MB *(1 << 20) +#define GB *(1U << 30) + +#define MAXD_LOG 16 +#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) + +#define ML_BITS 4 +#define ML_MASK ((1U << ML_BITS) - 1) +#define RUN_BITS (8 - ML_BITS) +#define RUN_MASK ((1U << RUN_BITS) - 1) + +/*-************************************ +* Error detection +**************************************/ +#define LZ4_STATIC_ASSERT(c) \ + { \ + enum { LZ4_static_assert = 1 / (int)(!!(c)) }; \ + } /* use only *after* variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2) +#include +#define DEBUGLOG(l, ...) \ + { \ + if (l <= LZ4_DEBUG) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } \ + } +#else +#define DEBUGLOG(l, ...) \ + {} /* disabled */ +#endif + +/*-************************************ +* Common functions +**************************************/ +static unsigned LZ4_NbCommonBytes(register reg_t val) { + if (LZ4_isLittleEndian()) { + if (sizeof(val) == 8) { +#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64(&r, (U64)val); + return (int)(r >> 3); +#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctzll((U64)val) >> 3); +#else + static const int DeBruijnBytePos[64] = { + 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, + 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, + 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7}; + return DeBruijnBytePos[((U64)((val & -(long long)val) * + 0x0218A392CDABBD3FULL)) >> + 58]; +#endif + } else /* 32 bits */ { +#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward(&r, (U32)val); + return (int)(r >> 3); +#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctz((U32)val) >> 3); +#else + static const int DeBruijnBytePos[32] = {0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, + 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, + 2, 0, 3, 1, 2, 0, 1, 0, 1, 1}; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +#endif + } + } else /* Big Endian CPU */ { + if (sizeof(val) == 8) { +#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse64(&r, val); + return (unsigned)(r >> 3); +#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clzll((U64)val) >> 3); +#else + unsigned r; + if (!(val >> 32)) { + r = 4; + } else { + r = 0; + val >>= 32; + } + if (!(val >> 16)) { + r += 2; + val >>= 8; + } else { + val >>= 24; + } + r += (!val); + return r; +#endif + } else /* 32 bits */ { +#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse(&r, (unsigned long)val); + return (unsigned)(r >> 3); +#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && \ + !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clz((U32)val) >> 3); +#else + unsigned r; + if (!(val >> 16)) { + r = 2; + val >>= 8; + } else { + r = 0; + val >>= 24; + } + r += (!val); + return r; +#endif + } + } +} + +#define STEPSIZE sizeof(reg_t) +static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, + const BYTE* pInLimit) { + const BYTE* const pStart = pIn; + + while (likely(pIn < pInLimit - (STEPSIZE - 1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { + pIn += STEPSIZE; + pMatch += STEPSIZE; + continue; + } + pIn += LZ4_NbCommonBytes(diff); + return (unsigned)(pIn - pStart); + } + + if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) && + (LZ4_read32(pMatch) == LZ4_read32(pIn))) { + pIn += 4; + pMatch += 4; + } + if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { + pIn += 2; + pMatch += 2; + } + if ((pIn < pInLimit) && (*pMatch == *pIn)) pIn++; + return (unsigned)(pIn - pStart); +} + +#ifndef LZ4_COMMONDEFS_ONLY +/*-************************************ +* Local Constants +**************************************/ +static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1)); +static const U32 + LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on + incompressible data */ + +/*-************************************ +* Local Structures and types +**************************************/ +typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive; +typedef enum { byPtr, byU32, byU16 } tableType_t; + +typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { full = 0, partial = 1 } earlyEnd_directive; + +/*-************************************ +* Local Utils +**************************************/ +int LZ4_versionNumber(void) { return LZ4_VERSION_NUMBER; } +const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } +int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } +int LZ4_sizeofState() { return LZ4_STREAMSIZE; } + +/*-****************************** +* Compression functions +********************************/ +static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) { + if (tableType == byU16) + return ((sequence * 2654435761U) >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1))); + else + return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG)); +} + +static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) { + static const U64 prime5bytes = 889523592379ULL; + static const U64 prime8bytes = 11400714785074694791ULL; + const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG; + if (LZ4_isLittleEndian()) + return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); + else + return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); +} + +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, + tableType_t const tableType) { + if ((sizeof(reg_t) == 8) && (tableType != byU16)) + return LZ4_hash5(LZ4_read_ARCH(p), tableType); + return LZ4_hash4(LZ4_read32(p), tableType); +} + +static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, + tableType_t const tableType, + const BYTE* srcBase) { + switch (tableType) { + case byPtr: { + const BYTE** hashTable = (const BYTE**)tableBase; + hashTable[h] = p; + return; + } + case byU32: { + U32* hashTable = (U32*)tableBase; + hashTable[h] = (U32)(p - srcBase); + return; + } + case byU16: { + U16* hashTable = (U16*)tableBase; + hashTable[h] = (U16)(p - srcBase); + return; + } + } +} + +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, + tableType_t tableType, + const BYTE* srcBase) { + U32 const h = LZ4_hashPosition(p, tableType); + LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, + tableType_t tableType, + const BYTE* srcBase) { + if (tableType == byPtr) { + const BYTE** hashTable = (const BYTE**)tableBase; + return hashTable[h]; + } + if (tableType == byU32) { + const U32* const hashTable = (U32*)tableBase; + return hashTable[h] + srcBase; + } + { + const U16* const hashTable = (U16*)tableBase; + return hashTable[h] + srcBase; + } /* default, to ensure a return */ +} + +LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, + tableType_t tableType, + const BYTE* srcBase) { + U32 const h = LZ4_hashPosition(p, tableType); + return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); +} + +/** LZ4_compress_generic() : + inlined, to ensure branches are decided at compilation time */ +LZ4_FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal* const cctx, const char* const source, + char* const dest, const int inputSize, const int maxOutputSize, + const limitedOutput_directive outputLimited, const tableType_t tableType, + const dict_directive dict, const dictIssue_directive dictIssue, + const U32 acceleration) { + const BYTE* ip = (const BYTE*)source; + const BYTE* base; + const BYTE* lowLimit; + const BYTE* const lowRefLimit = ip - cctx->dictSize; + const BYTE* const dictionary = cctx->dictionary; + const BYTE* const dictEnd = dictionary + cctx->dictSize; + const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source; + const BYTE* anchor = (const BYTE*)source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + + BYTE* op = (BYTE*)dest; + BYTE* const olimit = op + maxOutputSize; + + U32 forwardH; + + /* Init conditions */ + if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) + return 0; /* Unsupported inputSize, too large (or negative) */ + switch (dict) { + case noDict: + default: + base = (const BYTE*)source; + lowLimit = (const BYTE*)source; + break; + case withPrefix64k: + base = (const BYTE*)source - cctx->currentOffset; + lowLimit = (const BYTE*)source - cctx->dictSize; + break; + case usingExtDict: + base = (const BYTE*)source - cctx->currentOffset; + lowLimit = (const BYTE*)source; + break; + } + if ((tableType == byU16) && (inputSize >= LZ4_64Klimit)) + return 0; /* Size too large (not within 64K limit) */ + if (inputSize < LZ4_minLength) + goto _last_literals; /* Input too small, no compression (all literals) */ + + /* First Byte */ + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + ip++; + forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for (;;) { + ptrdiff_t refDelta = 0; + const BYTE* match; + BYTE* token; + + /* Find a match */ + { + const BYTE* forwardIp = ip; + unsigned step = 1; + unsigned searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimit)) goto _last_literals; + + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); + if (dict == usingExtDict) { + if (match < (const BYTE*)source) { + refDelta = dictDelta; + lowLimit = dictionary; + } else { + refDelta = 0; + lowLimit = (const BYTE*)source; + } + } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + + } while (((dictIssue == dictSmall) ? (match < lowRefLimit) : 0) || + ((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) || + (LZ4_read32(match + refDelta) != LZ4_read32(ip))); + } + + /* Catch up */ + while (((ip > anchor) & (match + refDelta > lowLimit)) && + (unlikely(ip[-1] == match[refDelta - 1]))) { + ip--; + match--; + } + + /* Encode Literals */ + { + unsigned const litLength = (unsigned)(ip - anchor); + token = op++; + if ((outputLimited) && /* Check output buffer overflow */ + (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + + (litLength / 255) > + olimit))) + return 0; + if (litLength >= RUN_MASK) { + int len = (int)litLength - RUN_MASK; + *token = (RUN_MASK << ML_BITS); + for (; len >= 255; len -= 255) *op++ = 255; + *op++ = (BYTE)len; + } else + *token = (BYTE)(litLength << ML_BITS); + + /* Copy Literals */ + LZ4_wildCopy(op, anchor, op + litLength); + op += litLength; + } + + _next_match: + /* Encode Offset */ + LZ4_writeLE16(op, (U16)(ip - match)); + op += 2; + + /* Encode MatchLength */ + { + unsigned matchCode; + + if ((dict == usingExtDict) && (lowLimit == dictionary)) { + const BYTE* limit; + match += refDelta; + limit = ip + (dictEnd - match); + if (limit > matchlimit) limit = matchlimit; + matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, limit); + ip += MINMATCH + matchCode; + if (ip == limit) { + unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit); + matchCode += more; + ip += more; + } + } else { + matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit); + ip += MINMATCH + matchCode; + } + + if (outputLimited && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode >> 8) > olimit))) + return 0; + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + while (matchCode >= 4 * 255) { + op += 4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4 * 255; + } + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else + *token += (BYTE)(matchCode); + } + + anchor = ip; + + /* Test end of chunk */ + if (ip > mflimit) break; + + /* Fill table */ + LZ4_putPosition(ip - 2, cctx->hashTable, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); + if (dict == usingExtDict) { + if (match < (const BYTE*)source) { + refDelta = dictDelta; + lowLimit = dictionary; + } else { + refDelta = 0; + lowLimit = (const BYTE*)source; + } + } + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) && + (match + MAX_DISTANCE >= ip) && + (LZ4_read32(match + refDelta) == LZ4_read32(ip))) { + token = op++; + *token = 0; + goto _next_match; + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { + size_t const lastRun = (size_t)(iend - anchor); + if ((outputLimited) && /* Check output buffer overflow */ + ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > + (U32)maxOutputSize)) + return 0; + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for (; accumulator >= 255; accumulator -= 255) *op++ = 255; + *op++ = (BYTE)accumulator; + } else { + *op++ = (BYTE)(lastRun << ML_BITS); + } + memcpy(op, anchor, lastRun); + op += lastRun; + } + + /* End */ + return (int)(((char*)op) - dest); +} + +int LZ4_compress_fast_extState(void* state, const char* source, char* dest, + int inputSize, int maxOutputSize, + int acceleration) { + LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; + LZ4_resetStream((LZ4_stream_t*)state); + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + + if (maxOutputSize >= LZ4_compressBound(inputSize)) { + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, + byU16, noDict, noDictIssue, acceleration); + else + return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, + (sizeof(void*) == 8) ? byU32 : byPtr, noDict, + noDictIssue, acceleration); + } else { + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, + limitedOutput, byU16, noDict, noDictIssue, + acceleration); + else + return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, + limitedOutput, + (sizeof(void*) == 8) ? byU32 : byPtr, noDict, + noDictIssue, acceleration); + } +} + +int LZ4_compress_fast(const char* source, char* dest, int inputSize, + int maxOutputSize, int acceleration) { +#if (LZ4_HEAPMODE) + void* ctxPtr = ALLOCATOR( + 1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ +#else + LZ4_stream_t ctx; + void* const ctxPtr = &ctx; +#endif + + int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, + maxOutputSize, acceleration); + +#if (LZ4_HEAPMODE) + FREEMEM(ctxPtr); +#endif + return result; +} + +int LZ4_compress_default(const char* source, char* dest, int inputSize, + int maxOutputSize) { + return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1); +} + +/* hidden debug function */ +/* strangely enough, gcc generates faster code when this function is + * uncommented, even if unused */ +int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, + int maxOutputSize, int acceleration) { + LZ4_stream_t ctx; + LZ4_resetStream(&ctx); + + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, + maxOutputSize, limitedOutput, byU16, noDict, + noDictIssue, acceleration); + else + return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, + maxOutputSize, limitedOutput, + sizeof(void*) == 8 ? byU32 : byPtr, noDict, + noDictIssue, acceleration); +} + +/*-****************************** +* *_destSize() variant +********************************/ + +static int LZ4_compress_destSize_generic(LZ4_stream_t_internal* const ctx, + const char* const src, char* const dst, + int* const srcSizePtr, + const int targetDstSize, + const tableType_t tableType) { + const BYTE* ip = (const BYTE*)src; + const BYTE* base = (const BYTE*)src; + const BYTE* lowLimit = (const BYTE*)src; + const BYTE* anchor = ip; + const BYTE* const iend = ip + *srcSizePtr; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + targetDstSize; + BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - + 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */; + BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */); + BYTE* const oMaxSeq = oMaxLit - 1 /* token */; + + U32 forwardH; + + /* Init conditions */ + if (targetDstSize < 1) return 0; /* Impossible to store anything */ + if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) + return 0; /* Unsupported input size, too large (or negative) */ + if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit)) + return 0; /* Size too large (not within 64K limit) */ + if (*srcSizePtr < LZ4_minLength) + goto _last_literals; /* Input too small, no compression (all literals) */ + + /* First Byte */ + *srcSizePtr = 0; + LZ4_putPosition(ip, ctx->hashTable, tableType, base); + ip++; + forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for (;;) { + const BYTE* match; + BYTE* token; + + /* Find a match */ + { + const BYTE* forwardIp = ip; + unsigned step = 1; + unsigned searchMatchNb = 1 << LZ4_skipTrigger; + + do { + U32 h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimit)) goto _last_literals; + + match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base); + + } while (((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) || + (LZ4_read32(match) != LZ4_read32(ip))); + } + + /* Catch up */ + while ((ip > anchor) && (match > lowLimit) && + (unlikely(ip[-1] == match[-1]))) { + ip--; + match--; + } + + /* Encode Literal length */ + { + unsigned litLength = (unsigned)(ip - anchor); + token = op++; + if (op + ((litLength + 240) / 255) + litLength > oMaxLit) { + /* Not enough space for a last match */ + op--; + goto _last_literals; + } + if (litLength >= RUN_MASK) { + unsigned len = litLength - RUN_MASK; + *token = (RUN_MASK << ML_BITS); + for (; len >= 255; len -= 255) *op++ = 255; + *op++ = (BYTE)len; + } else + *token = (BYTE)(litLength << ML_BITS); + + /* Copy Literals */ + LZ4_wildCopy(op, anchor, op + litLength); + op += litLength; + } + + _next_match: + /* Encode Offset */ + LZ4_writeLE16(op, (U16)(ip - match)); + op += 2; + + /* Encode MatchLength */ + { + size_t matchLength = + LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit); + + if (op + ((matchLength + 240) / 255) > oMaxMatch) { + /* Match description too long : reduce it */ + matchLength = (15 - 1) + (oMaxMatch - op) * 255; + } + ip += MINMATCH + matchLength; + + if (matchLength >= ML_MASK) { + *token += ML_MASK; + matchLength -= ML_MASK; + while (matchLength >= 255) { + matchLength -= 255; + *op++ = 255; + } + *op++ = (BYTE)matchLength; + } else + *token += (BYTE)(matchLength); + } + + anchor = ip; + + /* Test end of block */ + if (ip > mflimit) break; + if (op > oMaxSeq) break; + + /* Fill table */ + LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); + LZ4_putPosition(ip, ctx->hashTable, tableType, base); + if ((match + MAX_DISTANCE >= ip) && (LZ4_read32(match) == LZ4_read32(ip))) { + token = op++; + *token = 0; + goto _next_match; + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { + size_t lastRunSize = (size_t)(iend - anchor); + if (op + 1 /* token */ + ((lastRunSize + 240) / 255) /* litLength */ + + lastRunSize /* literals */ > oend) { + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (oend - op) - 1; + lastRunSize -= (lastRunSize + 240) / 255; + } + ip = anchor + lastRunSize; + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for (; accumulator >= 255; accumulator -= 255) *op++ = 255; + *op++ = (BYTE)accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + memcpy(op, anchor, lastRunSize); + op += lastRunSize; + } + + /* End */ + *srcSizePtr = (int)(((const char*)ip) - src); + return (int)(((char*)op) - dst); +} + +static int LZ4_compress_destSize_extState(LZ4_stream_t* state, const char* src, + char* dst, int* srcSizePtr, + int targetDstSize) { + LZ4_resetStream(state); + + if (targetDstSize >= + LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ + return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, + targetDstSize, 1); + } else { + if (*srcSizePtr < LZ4_64Klimit) + return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, + srcSizePtr, targetDstSize, byU16); + else + return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, + srcSizePtr, targetDstSize, + sizeof(void*) == 8 ? byU32 : byPtr); + } +} + +int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, + int targetDstSize) { +#if (LZ4_HEAPMODE) + LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR( + 1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ +#else + LZ4_stream_t ctxBody; + LZ4_stream_t* ctx = &ctxBody; +#endif + + int result = + LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); + +#if (LZ4_HEAPMODE) + FREEMEM(ctx); +#endif + return result; +} + +/*-****************************** +* Streaming functions +********************************/ + +LZ4_stream_t* LZ4_createStream(void) { + LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64); + LZ4_STATIC_ASSERT( + LZ4_STREAMSIZE >= + sizeof(LZ4_stream_t_internal)); /* A compilation error here means + LZ4_STREAMSIZE is not large enough */ + LZ4_resetStream(lz4s); + return lz4s; +} + +void LZ4_resetStream(LZ4_stream_t* LZ4_stream) { + MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); +} + +int LZ4_freeStream(LZ4_stream_t* LZ4_stream) { + if (!LZ4_stream) return 0; /* support free on NULL */ + FREEMEM(LZ4_stream); + return (0); +} + +#define HASH_UNIT sizeof(reg_t) +int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) { + LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; + const BYTE* p = (const BYTE*)dictionary; + const BYTE* const dictEnd = p + dictSize; + const BYTE* base; + + if ((dict->initCheck) || + (dict->currentOffset > + 1 GB)) /* Uninitialized structure, or reuse overflow */ + LZ4_resetStream(LZ4_dict); + + if (dictSize < (int)HASH_UNIT) { + dict->dictionary = NULL; + dict->dictSize = 0; + return 0; + } + + if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; + dict->currentOffset += 64 KB; + base = p - dict->currentOffset; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->currentOffset += dict->dictSize; + + while (p <= dictEnd - HASH_UNIT) { + LZ4_putPosition(p, dict->hashTable, byU32, base); + p += 3; + } + + return dict->dictSize; +} + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) { + if ((LZ4_dict->currentOffset > 0x80000000) || + ((uptrval)LZ4_dict->currentOffset > + (uptrval)src)) { /* address space overflow */ + /* rescale hash table */ + U32 const delta = LZ4_dict->currentOffset - 64 KB; + const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + for (i = 0; i < LZ4_HASH_SIZE_U32; i++) { + if (LZ4_dict->hashTable[i] < delta) + LZ4_dict->hashTable[i] = 0; + else + LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 KB; + if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + +int LZ4_compress_fast_continue(LZ4_stream_t* LZ4_stream, const char* source, + char* dest, int inputSize, int maxOutputSize, + int acceleration) { + LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; + const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + const BYTE* smallest = (const BYTE*)source; + if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ + if ((streamPtr->dictSize > 0) && (smallest > dictEnd)) smallest = dictEnd; + LZ4_renormDictT(streamPtr, smallest); + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + + /* Check overlapping input/dictionary space */ + { + const BYTE* sourceEnd = (const BYTE*)source + inputSize; + if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; + if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; + streamPtr->dictionary = dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == (const BYTE*)source) { + int result; + if ((streamPtr->dictSize < 64 KB) && + (streamPtr->dictSize < streamPtr->currentOffset)) + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + withPrefix64k, dictSmall, acceleration); + else + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + withPrefix64k, noDictIssue, acceleration); + streamPtr->dictSize += (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } + + /* external dictionary mode */ + { + int result; + if ((streamPtr->dictSize < 64 KB) && + (streamPtr->dictSize < streamPtr->currentOffset)) + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + usingExtDict, dictSmall, acceleration); + else + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, + maxOutputSize, limitedOutput, byU32, + usingExtDict, noDictIssue, acceleration); + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } +} + +/* Hidden debug function, to force external dictionary mode */ +int LZ4_compress_forceExtDict(LZ4_stream_t* LZ4_dict, const char* source, + char* dest, int inputSize) { + LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; + int result; + const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + const BYTE* smallest = dictEnd; + if (smallest > (const BYTE*)source) smallest = (const BYTE*)source; + LZ4_renormDictT(streamPtr, smallest); + + result = + LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, + byU32, usingExtDict, noDictIssue, 1); + + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + + return result; +} + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at + * its memory location, + * save it into a safer place (char* safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_fast_continue(). + * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if + * error. + */ +int LZ4_saveDict(LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) { + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; + + if ((U32)dictSize > 64 KB) + dictSize = 64 KB; /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize; + + memmove(safeBuffer, previousDictEnd - dictSize, dictSize); + + dict->dictionary = (const BYTE*)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} + +/*-***************************** +* Decompression functions +*******************************/ +/*! LZ4_decompress_generic() : + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get + * inlined, + * in order to remove useless branches during compilation optimization. + */ +LZ4_FORCE_INLINE int LZ4_decompress_generic( + const char* const src, char* const dst, int srcSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is + `dstCapacity` */ + + int endOnInput, /* endOnOutputSize, endOnInputSize */ + int partialDecoding, /* full, partial */ + int targetOutputSize, /* only used if partialDecoding==partial */ + int dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* == dst when no prefix */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) { + const BYTE* ip = (const BYTE*)src; + const BYTE* const iend = ip + srcSize; + + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + outputSize; + BYTE* cpy; + BYTE* oexit = op + targetOutputSize; + + const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; + const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; + const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; + + const int safeDecode = (endOnInput == endOnInputSize); + const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + + /* Special cases */ + if ((partialDecoding) && (oexit > oend - MFLIMIT)) + oexit = oend - MFLIMIT; /* targetOutputSize too high => decode everything */ + if ((endOnInput) && (unlikely(outputSize == 0))) + return ((srcSize == 1) && (*ip == 0)) ? 0 : -1; /* Empty output buffer */ + if ((!endOnInput) && (unlikely(outputSize == 0))) return (*ip == 0 ? 1 : -1); + + /* Main Loop : decode sequences */ + while (1) { + size_t length; + const BYTE* match; + size_t offset; + + /* get literal length */ + unsigned const token = *ip++; + if ((length = (token >> ML_BITS)) == RUN_MASK) { + unsigned s; + do { + s = *ip++; + length += s; + } while (likely(endOnInput ? ip < iend - RUN_MASK : 1) & (s == 255)); + if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)(op))) + goto _output_error; /* overflow detection */ + if ((safeDecode) && unlikely((uptrval)(ip) + length < (uptrval)(ip))) + goto _output_error; /* overflow detection */ + } + + /* copy literals */ + cpy = op + length; + if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) || + (ip + length > iend - (2 + 1 + LASTLITERALS)))) || + ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { + if (partialDecoding) { + if (cpy > oend) + goto _output_error; /* Error : write attempt beyond end of output + buffer */ + if ((endOnInput) && (ip + length > iend)) + goto _output_error; /* Error : read attempt beyond end of input buffer + */ + } else { + if ((!endOnInput) && (cpy != oend)) + goto _output_error; /* Error : block decoding must stop exactly there + */ + if ((endOnInput) && ((ip + length != iend) || (cpy > oend))) + goto _output_error; /* Error : input must be consumed */ + } + memcpy(op, ip, length); + ip += length; + op += length; + break; /* Necessarily EOF, due to parsing restrictions */ + } + LZ4_wildCopy(op, ip, cpy); + ip += length; + op = cpy; + + /* get offset */ + offset = LZ4_readLE16(ip); + ip += 2; + match = op - offset; + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) + goto _output_error; /* Error : offset outside buffers */ + LZ4_write32( + op, + (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ + + /* get matchlength */ + length = token & ML_MASK; + if (length == ML_MASK) { + unsigned s; + do { + s = *ip++; + if ((endOnInput) && (ip > iend - LASTLITERALS)) goto _output_error; + length += s; + } while (s == 255); + if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)op)) + goto _output_error; /* overflow detection */ + } + length += MINMATCH; + + /* check external dictionary */ + if ((dict == usingExtDict) && (match < lowPrefix)) { + if (unlikely(op + length > oend - LASTLITERALS)) + goto _output_error; /* doesn't respect parsing restriction */ + + if (length <= (size_t)(lowPrefix - match)) { + /* match can be copied as a single segment from external dictionary */ + memmove(op, dictEnd - (lowPrefix - match), length); + op += length; + } else { + /* match encompass external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } else { + memcpy(op, lowPrefix, restSize); + op += restSize; + } + } + continue; + } + + /* copy match within block */ + cpy = op + length; + if (unlikely(offset < 8)) { + const int dec64 = dec64table[offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[offset]; + memcpy(op + 4, match, 4); + match -= dec64; + } else { + LZ4_copy8(op, match); + match += 8; + } + op += 8; + + if (unlikely(cpy > oend - 12)) { + BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH - 1); + if (cpy > oend - LASTLITERALS) + goto _output_error; /* Error : last LASTLITERALS bytes must be literals + (uncompressed) */ + if (op < oCopyLimit) { + LZ4_wildCopy(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op < cpy) *op++ = *match++; + } else { + LZ4_copy8(op, match); + if (length > 16) LZ4_wildCopy(op + 8, match + 8, cpy); + } + op = cpy; /* correction */ + } + + /* end of decoding */ + if (endOnInput) + return (int)(((char*)op) - dst); /* Nb of output bytes decoded */ + else + return (int)(((const char*)ip) - src); /* Nb of input bytes read */ + +/* Overflow error detected */ +_output_error: + return (int)(-(((const char*)ip) - src)) - 1; +} + +int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, + int maxDecompressedSize) { + return LZ4_decompress_generic(source, dest, compressedSize, + maxDecompressedSize, endOnInputSize, full, 0, + noDict, (BYTE*)dest, NULL, 0); +} + +int LZ4_decompress_safe_partial(const char* source, char* dest, + int compressedSize, int targetOutputSize, + int maxDecompressedSize) { + return LZ4_decompress_generic(source, dest, compressedSize, + maxDecompressedSize, endOnInputSize, partial, + targetOutputSize, noDict, (BYTE*)dest, NULL, 0); +} + +int LZ4_decompress_fast(const char* source, char* dest, int originalSize) { + return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, + full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), + NULL, 64 KB); +} + +/*===== streaming decompression functions =====*/ + +LZ4_streamDecode_t* LZ4_createStreamDecode(void) { + LZ4_streamDecode_t* lz4s = + (LZ4_streamDecode_t*)ALLOCATOR(1, sizeof(LZ4_streamDecode_t)); + return lz4s; +} + +int LZ4_freeStreamDecode(LZ4_streamDecode_t* LZ4_stream) { + if (!LZ4_stream) return 0; /* support free on NULL */ + FREEMEM(LZ4_stream); + return 0; +} + +/*! + * LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it + * was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * Return : 1 if OK, 0 if error + */ +int LZ4_setStreamDecode(LZ4_streamDecode_t* LZ4_streamDecode, + const char* dictionary, int dictSize) { + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + lz4sd->prefixSize = (size_t)dictSize; + lz4sd->prefixEnd = (const BYTE*)dictionary + dictSize; + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in +"streaming" mode. + Previously decoded blocks must still be available at the memory position +where they were decoded. + If it's not possible, save the relevant part of decoded data into a safe +buffer, + and indicate where it stands using LZ4_setStreamDecode() +*/ +int LZ4_decompress_safe_continue(LZ4_streamDecode_t* LZ4_streamDecode, + const char* source, char* dest, + int compressedSize, int maxOutputSize) { + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixEnd == (BYTE*)dest) { + result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, usingExtDict, + lz4sd->prefixEnd - lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += result; + lz4sd->prefixEnd += result; + } else { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_generic( + source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, + usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } + + return result; +} + +int LZ4_decompress_fast_continue(LZ4_streamDecode_t* LZ4_streamDecode, + const char* source, char* dest, + int originalSize) { + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixEnd == (BYTE*)dest) { + result = LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, usingExtDict, + lz4sd->prefixEnd - lz4sd->prefixSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += originalSize; + lz4sd->prefixEnd += originalSize; + } else { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_generic( + source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, + (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } + + return result; +} + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as "_continue" ones, + the dictionary must be explicitly provided within parameters +*/ + +LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic( + const char* source, char* dest, int compressedSize, int maxOutputSize, + int safe, const char* dictStart, int dictSize) { + if (dictSize == 0) + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + safe, full, 0, noDict, (BYTE*)dest, NULL, 0); + if (dictStart + dictSize == dest) { + if (dictSize >= (int)(64 KB - 1)) + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + safe, full, 0, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + safe, full, 0, noDict, (BYTE*)dest - dictSize, + NULL, 0); + } + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + safe, full, 0, usingExtDict, (BYTE*)dest, + (const BYTE*)dictStart, dictSize); +} + +int LZ4_decompress_safe_usingDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const char* dictStart, int dictSize) { + return LZ4_decompress_usingDict_generic( + source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); +} + +int LZ4_decompress_fast_usingDict(const char* source, char* dest, + int originalSize, const char* dictStart, + int dictSize) { + return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, + dictStart, dictSize); +} + +/* debug function */ +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const char* dictStart, int dictSize) { + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +/*=************************************************* +* Obsolete Functions +***************************************************/ +/* obsolete compression functions */ +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, + int maxOutputSize) { + return LZ4_compress_default(source, dest, inputSize, maxOutputSize); +} +int LZ4_compress(const char* source, char* dest, int inputSize) { + return LZ4_compress_default(source, dest, inputSize, + LZ4_compressBound(inputSize)); +} +int LZ4_compress_limitedOutput_withState(void* state, const char* src, + char* dst, int srcSize, int dstSize) { + return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); +} +int LZ4_compress_withState(void* state, const char* src, char* dst, + int srcSize) { + return LZ4_compress_fast_extState(state, src, dst, srcSize, + LZ4_compressBound(srcSize), 1); +} +int LZ4_compress_limitedOutput_continue(LZ4_stream_t* LZ4_stream, + const char* src, char* dst, int srcSize, + int maxDstSize) { + return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, + 1); +} +int LZ4_compress_continue(LZ4_stream_t* LZ4_stream, const char* source, + char* dest, int inputSize) { + return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, + LZ4_compressBound(inputSize), 1); +} + +/* +These function names are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is totally equivalent to LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe +*/ +int LZ4_uncompress(const char* source, char* dest, int outputSize) { + return LZ4_decompress_fast(source, dest, outputSize); +} +int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, + int maxOutputSize) { + return LZ4_decompress_safe(source, dest, isize, maxOutputSize); +} + +/* Obsolete Streaming functions */ + +int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } + +static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base) { + MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t)); + lz4ds->internal_donotuse.bufferStart = base; +} + +int LZ4_resetStreamState(void* state, char* inputBuffer) { + if ((((uptrval)state) & 3) != 0) + return 1; /* Error : pointer is not aligned on 4-bytes boundary */ + LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer); + return 0; +} + +void* LZ4_create(char* inputBuffer) { + LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t)); + LZ4_init(lz4ds, (BYTE*)inputBuffer); + return lz4ds; +} + +char* LZ4_slideInputBuffer(void* LZ4_Data) { + LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse; + int dictSize = + LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB); + return (char*)(ctx->bufferStart + dictSize); +} + +/* Obsolete streaming decompression functions */ + +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, + int compressedSize, int maxOutputSize) { + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 64 KB); +} + +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, + int originalSize) { + return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, + full, 0, withPrefix64k, (BYTE*)dest - 64 KB, + NULL, 64 KB); +} + +#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/src/util/src/shash.c b/src/util/src/shash.c new file mode 100644 index 000000000000..540e66c32fc3 --- /dev/null +++ b/src/util/src/shash.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "shash.h" +#include "tlog.h" + +typedef struct _str_node_t { + char * string; + struct _str_node_t *prev; + struct _str_node_t *next; + char data[]; +} SHashNode; + +typedef struct { + SHashNode **hashList; + uint32_t maxSessions; + uint32_t dataSize; + uint32_t (*hashFp)(void *, char *string); + pthread_mutex_t mutex; +} SHashObj; + +uint32_t taosHashString(void *handle, char *string) { + SHashObj *pObj = (SHashObj *)handle; + uint32_t hash = 0, hashv; + char * c; + + c = string; + while (*c) { + hash += *((int *)c); + c += 4; + } + + hashv = hash / pObj->maxSessions; + hash = (hashv + hash % pObj->maxSessions) % pObj->maxSessions; + + return hash; +} + +uint32_t taosHashStringStep1(void *handle, char *string) { + SHashObj *pObj = (SHashObj *)handle; + uint32_t hash = 0, hashv; + char * c; + + c = string; + while (*c) { + hash += *c; + c++; + } + + hashv = hash / pObj->maxSessions; + hash = (hashv + hash % pObj->maxSessions) % pObj->maxSessions; + + return hash; +} + +void *taosAddStrHashWithSize(void *handle, char *string, char *pData, int dataSize) { + uint32_t hash; + SHashNode *pNode; + SHashObj * pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + if (string == NULL || string[0] == 0) return NULL; + + hash = (*pObj->hashFp)(pObj, string); + + pthread_mutex_lock(&pObj->mutex); + + pNode = (SHashNode *)malloc(sizeof(SHashNode) + (size_t)dataSize + strlen(string) + 1); + memcpy(pNode->data, pData, (size_t)dataSize); + pNode->prev = 0; + pNode->next = pObj->hashList[hash]; + pNode->string = pNode->data + dataSize; + strcpy(pNode->string, string); + + if (pObj->hashList[hash] != 0) (pObj->hashList[hash])->prev = pNode; + pObj->hashList[hash] = pNode; + + pthread_mutex_unlock(&pObj->mutex); + + pTrace("hash:%d:%s is added", hash, string); + + return pNode->data; +} + +void *taosAddStrHash(void *handle, char *string, char *pData) { + if (string == NULL || string[0] == 0) return NULL; + + SHashObj *pObj = (SHashObj *)handle; + return taosAddStrHashWithSize(handle, string, pData, pObj->dataSize); +} + +void taosDeleteStrHashNode(void *handle, char *string, void *pDeleteNode) { + uint32_t hash; + SHashNode *pNode; + SHashObj * pObj; + bool find = false; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + if (string == NULL || string[0] == 0) return; + + hash = (*(pObj->hashFp))(pObj, string); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->hashList[hash]; + + while (pNode) { + if (strcmp(pNode->string, string) != 0) continue; + if (pNode->data == pDeleteNode) { + find = true; + break; + } + + pNode = pNode->next; + } + + if (find && pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->hashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + pTrace("hash:%d:%s:%p is removed", hash, string, pNode); + + free(pNode); + } + + pthread_mutex_unlock(&pObj->mutex); +} + +void taosDeleteStrHash(void *handle, char *string) { + uint32_t hash; + SHashNode *pNode; + SHashObj * pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + if (string == NULL || string[0] == 0) return; + + hash = (*(pObj->hashFp))(pObj, string); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->hashList[hash]; + while (pNode) { + if (strcmp(pNode->string, string) == 0) break; + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->hashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + pTrace("hash:%d:%s:%p is removed", hash, string, pNode); + + free(pNode); + } + + pthread_mutex_unlock(&pObj->mutex); +} + +void *taosGetStrHashData(void *handle, char *string) { + uint32_t hash; + SHashNode *pNode; + SHashObj * pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + if (string == NULL || string[0] == 0) return NULL; + + hash = (*pObj->hashFp)(pObj, string); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->hashList[hash]; + + while (pNode) { + if (strcmp(pNode->string, string) == 0) { + pTrace("hash:%d:%s is retrieved", hash, string); + break; + } + + pNode = pNode->next; + } + + pthread_mutex_unlock(&pObj->mutex); + + if (pNode) return pNode->data; + + return NULL; +} + +void *taosInitStrHash(uint32_t maxSessions, uint32_t dataSize, uint32_t (*fp)(void *, char *)) { + SHashObj *pObj; + + pObj = (SHashObj *)malloc(sizeof(SHashObj)); + if (pObj == NULL) { + return NULL; + } + + memset(pObj, 0, sizeof(SHashObj)); + pObj->maxSessions = maxSessions; + pObj->dataSize = dataSize; + pObj->hashFp = fp; + + pObj->hashList = (SHashNode **)malloc(sizeof(SHashNode *) * (size_t)maxSessions); + if (pObj->hashList == NULL) { + free(pObj); + return NULL; + } + memset(pObj->hashList, 0, sizeof(SHashNode *) * (size_t)maxSessions); + + pthread_mutex_init(&pObj->mutex, NULL); + + return pObj; +} + +void taosCleanUpStrHashWithFp(void *handle, void (*fp)(char *)) { + SHashObj * pObj; + SHashNode *pNode, *pNext; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions <= 0) return; + + pthread_mutex_lock(&pObj->mutex); + + if (pObj->hashList) { + for (int i = 0; i < pObj->maxSessions; ++i) { + pNode = pObj->hashList[i]; + while (pNode) { + pNext = pNode->next; + if (fp != NULL) fp(pNode->data); + free(pNode); + pNode = pNext; + } + } + + free(pObj->hashList); + } + + pthread_mutex_unlock(&pObj->mutex); + + pthread_mutex_destroy(&pObj->mutex); + + memset(pObj, 0, sizeof(SHashObj)); + free(pObj); +} + +void taosCleanUpStrHash(void *handle) { taosCleanUpStrHashWithFp(handle, NULL); } + +char *taosVisitStrHashWithFp(void *handle, int (*fp)(char *)) { + SHashObj * pObj; + SHashNode *pNode, *pNext; + char * pData = NULL; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions <= 0) return NULL; + + pthread_mutex_lock(&pObj->mutex); + + if (pObj->hashList) { + for (int i = 0; i < pObj->maxSessions; ++i) { + pNode = pObj->hashList[i]; + while (pNode) { + pNext = pNode->next; + int flag = fp(pNode->data); + if (flag) { + pData = pNode->data; + goto VisitEnd; + } + + pNode = pNext; + } + } + } + +VisitEnd: + + pthread_mutex_unlock(&pObj->mutex); + return pData; +} diff --git a/src/util/src/sql.c b/src/util/src/sql.c new file mode 100644 index 000000000000..eca64a274751 --- /dev/null +++ b/src/util/src/sql.c @@ -0,0 +1,2402 @@ +/* +** 2000-05-29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Driver template for the LEMON parser generator. +** +** The "lemon" program processes an LALR(1) input grammar file, then uses +** this template to construct a parser. The "lemon" program inserts text +** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** interstitial "-" characters) contained in this template is changed into +** the value of the %name directive from the grammar. Otherwise, the content +** of this template is copied straight through into the generate parser +** source file. +** +** The following is the concatenation of all %include directives from the +** input grammar file: +*/ +#include +/************ Begin %include sections from the grammar ************************/ + +#include +#include +#include +#include +#include + +#include "tsql.h" +#include "tutil.h" +/**************** End of %include directives **********************************/ +/* These constants specify the various numeric values for terminal symbols +** in a format understandable to "makeheaders". This section is blank unless +** "lemon" is run with the "-m" command-line option. +***************** Begin makeheaders token definitions *************************/ +/**************** End makeheaders token definitions ***************************/ + +/* The next sections is a series of control #defines. +** various aspects of the generated parser. +** YYCODETYPE is the data type used to store the integer codes +** that represent terminal and non-terminal symbols. +** "unsigned char" is used if there are fewer than +** 256 symbols. Larger types otherwise. +** YYNOCODE is a number of type YYCODETYPE that is not used for +** any terminal or nonterminal symbol. +** YYFALLBACK If defined, this indicates that one or more tokens +** (also known as: "terminal symbols") have fall-back +** values which should be used if the original symbol +** would not parse. This permits keywords to sometimes +** be used as identifiers, for example. +** YYACTIONTYPE is the data type used for "action codes" - numbers +** that indicate what to do in response to the next +** token. +** ParseTOKENTYPE is the data type used for minor type for terminal +** symbols. Background: A "minor type" is a semantic +** value associated with a terminal or non-terminal +** symbols. For example, for an "ID" terminal symbol, +** the minor type might be the name of the identifier. +** Each non-terminal can have a different minor type. +** Terminal symbols all have the same minor type, though. +** This macros defines the minor type for terminal +** symbols. +** YYMINORTYPE is the data type used for all minor types. +** This is typically a union of many types, one of +** which is ParseTOKENTYPE. The entry in the union +** for terminal symbols is called "yy0". +** YYSTACKDEPTH is the maximum depth of the parser's stack. If +** zero the stack is dynamically sized using realloc() +** ParseARG_SDECL A static variable declaration for the %extra_argument +** ParseARG_PDECL A parameter declaration for the %extra_argument +** ParseARG_STORE Code to store %extra_argument into yypParser +** ParseARG_FETCH Code to extract %extra_argument from yypParser +** YYERRORSYMBOL is the code number of the error symbol. If not +** defined, then do no error processing. +** YYNSTATE the combined number of states. +** YYNRULE the number of rules in the grammar +** YY_MAX_SHIFT Maximum value for shift actions +** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions +** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions +** YY_MIN_REDUCE Maximum value for reduce actions +** YY_ERROR_ACTION The yy_action[] code for syntax error +** YY_ACCEPT_ACTION The yy_action[] code for accept +** YY_NO_ACTION The yy_action[] code for no-op +*/ +#ifndef INTERFACE +# define INTERFACE 1 +#endif +/************* Begin control #defines *****************************************/ +#define YYCODETYPE unsigned char +#define YYNOCODE 241 +#define YYACTIONTYPE unsigned short int +#define ParseTOKENTYPE SSQLToken +typedef union { + int yyinit; + ParseTOKENTYPE yy0; + SQuerySQL* yy24; + SCreateDBSQL yy54; + tSQLExprList* yy98; + tFieldList* yy151; + tVariantList* yy216; + tVariant yy266; + SCreateTableSQL* yy278; + SLimitVal yy294; + TAOS_FIELD yy343; + tSQLExpr* yy370; + int yy412; + tSQLExprListList* yy434; +} YYMINORTYPE; +#ifndef YYSTACKDEPTH +#define YYSTACKDEPTH 100 +#endif +#define ParseARG_SDECL SSqlInfo* pInfo; +#define ParseARG_PDECL ,SSqlInfo* pInfo +#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo +#define ParseARG_STORE yypParser->pInfo = pInfo +#define YYFALLBACK 1 +#define YYNSTATE 227 +#define YYNRULE 180 +#define YY_MAX_SHIFT 226 +#define YY_MIN_SHIFTREDUCE 347 +#define YY_MAX_SHIFTREDUCE 526 +#define YY_MIN_REDUCE 527 +#define YY_MAX_REDUCE 706 +#define YY_ERROR_ACTION 707 +#define YY_ACCEPT_ACTION 708 +#define YY_NO_ACTION 709 +/************* End control #defines *******************************************/ + +/* The yyzerominor constant is used to initialize instances of +** YYMINORTYPE objects to zero. */ +static const YYMINORTYPE yyzerominor = { 0 }; + +/* Define the yytestcase() macro to be a no-op if is not already defined +** otherwise. +** +** Applications can choose to define yytestcase() in the %include section +** to a macro that can assist in verifying code coverage. For production +** code the yytestcase() macro should be turned off. But it is useful +** for testing. +*/ +#ifndef yytestcase +# define yytestcase(X) +#endif + + +/* Next are the tables used to determine what action to take based on the +** current state and lookahead token. These tables are used to implement +** functions that take a state number and lookahead value and return an +** action integer. +** +** Suppose the action integer is N. Then the action is determined as +** follows +** +** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead +** token onto the stack and goto state N. +** +** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then +** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. +** +** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE +** and YY_MAX_REDUCE + +** N == YY_ERROR_ACTION A syntax error has occurred. +** +** N == YY_ACCEPT_ACTION The parser accepts its input. +** +** N == YY_NO_ACTION No such action. Denotes unused +** slots in the yy_action[] table. +** +** The action table is constructed as a single large table named yy_action[]. +** Given state S and lookahead X, the action is computed as +** +** yy_action[ yy_shift_ofst[S] + X ] +** +** If the index value yy_shift_ofst[S]+X is out of range or if the value +** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S] +** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table +** and that yy_default[S] should be used instead. +** +** The formula above is for computing the action when the lookahead is +** a terminal symbol. If the lookahead is a non-terminal (as occurs after +** a reduce action) then the yy_reduce_ofst[] array is used in place of +** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of +** YY_SHIFT_USE_DFLT. +** +** The following are the tables generated in this section: +** +** yy_action[] A single table containing all actions. +** yy_lookahead[] A table containing the lookahead for each entry in +** yy_action. Used to detect hash collisions. +** yy_shift_ofst[] For each state, the offset into yy_action for +** shifting terminals. +** yy_reduce_ofst[] For each state, the offset into yy_action for +** shifting non-terminals after a reduce. +** yy_default[] Default action for each state. +** +*********** Begin parsing tables **********************************************/ +#define YY_ACTTAB_COUNT (472) +static const YYACTIONTYPE yy_action[] = { + /* 0 */ 379, 32, 31, 708, 226, 30, 29, 28, 380, 69, + /* 10 */ 70, 76, 39, 41, 513, 33, 34, 221, 25, 71, + /* 20 */ 27, 19, 124, 193, 37, 35, 38, 36, 220, 435, + /* 30 */ 150, 509, 32, 31, 142, 225, 30, 29, 28, 39, + /* 40 */ 41, 431, 33, 34, 428, 423, 429, 27, 430, 124, + /* 50 */ 193, 37, 35, 38, 36, 443, 458, 149, 509, 32, + /* 60 */ 31, 19, 124, 30, 29, 28, 39, 41, 160, 33, + /* 70 */ 34, 508, 9, 159, 27, 415, 379, 193, 37, 35, + /* 80 */ 38, 36, 458, 157, 380, 424, 32, 31, 379, 53, + /* 90 */ 30, 29, 28, 426, 439, 41, 380, 33, 34, 465, + /* 100 */ 45, 188, 27, 216, 215, 193, 37, 35, 38, 36, + /* 110 */ 102, 114, 61, 478, 32, 31, 121, 46, 30, 29, + /* 120 */ 28, 19, 19, 190, 161, 55, 123, 213, 64, 19, + /* 130 */ 147, 432, 348, 349, 350, 351, 352, 353, 354, 355, + /* 140 */ 356, 357, 358, 158, 214, 424, 424, 458, 427, 33, + /* 150 */ 34, 219, 6, 424, 27, 58, 99, 193, 37, 35, + /* 160 */ 38, 36, 30, 29, 28, 122, 32, 31, 128, 122, + /* 170 */ 30, 29, 28, 153, 490, 505, 143, 481, 10, 484, + /* 180 */ 144, 487, 504, 146, 490, 458, 167, 481, 218, 484, + /* 190 */ 464, 487, 503, 175, 141, 172, 459, 460, 78, 77, + /* 200 */ 135, 119, 117, 79, 156, 151, 152, 425, 140, 192, + /* 210 */ 224, 223, 367, 208, 138, 151, 152, 153, 490, 440, + /* 220 */ 480, 481, 427, 484, 139, 487, 385, 23, 56, 113, + /* 230 */ 378, 427, 514, 57, 437, 412, 25, 54, 450, 451, + /* 240 */ 145, 129, 43, 14, 24, 181, 194, 130, 177, 151, + /* 250 */ 152, 37, 35, 38, 36, 137, 441, 507, 40, 32, + /* 260 */ 31, 13, 491, 30, 29, 28, 13, 489, 40, 483, + /* 270 */ 1, 486, 482, 131, 485, 421, 1, 489, 48, 43, + /* 280 */ 132, 420, 488, 198, 433, 20, 434, 20, 68, 67, + /* 290 */ 8, 7, 488, 49, 75, 74, 133, 134, 523, 475, + /* 300 */ 126, 120, 40, 474, 127, 125, 154, 471, 470, 155, + /* 310 */ 217, 489, 418, 442, 87, 178, 457, 100, 456, 98, + /* 320 */ 101, 386, 199, 180, 81, 409, 488, 21, 212, 522, + /* 330 */ 446, 65, 521, 182, 519, 186, 115, 22, 377, 376, + /* 340 */ 72, 50, 436, 90, 374, 373, 162, 116, 371, 370, + /* 350 */ 369, 362, 118, 366, 364, 47, 85, 445, 42, 191, + /* 360 */ 91, 189, 183, 187, 26, 23, 185, 211, 196, 62, + /* 370 */ 200, 51, 201, 202, 203, 59, 206, 204, 205, 207, + /* 380 */ 16, 209, 222, 526, 163, 111, 63, 109, 105, 94, + /* 390 */ 92, 93, 422, 411, 95, 96, 97, 108, 103, 104, + /* 400 */ 110, 106, 107, 112, 136, 372, 164, 80, 368, 166, + /* 410 */ 82, 165, 525, 168, 169, 170, 171, 524, 174, 517, + /* 420 */ 11, 176, 173, 12, 179, 86, 148, 17, 447, 88, + /* 430 */ 184, 3, 452, 89, 480, 4, 60, 492, 2, 15, + /* 440 */ 18, 5, 195, 407, 197, 405, 403, 401, 399, 397, + /* 450 */ 395, 393, 43, 383, 392, 44, 66, 20, 417, 210, + /* 460 */ 416, 414, 52, 390, 73, 381, 360, 527, 83, 529, + /* 470 */ 529, 84, +}; +static const YYCODETYPE yy_lookahead[] = { + /* 0 */ 1, 33, 34, 187, 188, 37, 38, 39, 9, 61, + /* 10 */ 62, 63, 13, 14, 87, 16, 17, 69, 91, 71, + /* 20 */ 21, 190, 228, 24, 25, 26, 27, 28, 76, 213, + /* 30 */ 236, 237, 33, 34, 189, 190, 37, 38, 39, 13, + /* 40 */ 14, 2, 16, 17, 5, 214, 7, 21, 9, 228, + /* 50 */ 24, 25, 26, 27, 28, 190, 190, 236, 237, 33, + /* 60 */ 34, 190, 228, 37, 38, 39, 13, 14, 60, 16, + /* 70 */ 17, 237, 228, 34, 21, 5, 1, 24, 25, 26, + /* 80 */ 27, 28, 190, 212, 9, 214, 33, 34, 1, 90, + /* 90 */ 37, 38, 39, 215, 229, 14, 9, 16, 17, 233, + /* 100 */ 91, 235, 21, 33, 34, 24, 25, 26, 27, 28, + /* 110 */ 61, 62, 63, 87, 33, 34, 228, 108, 37, 38, + /* 120 */ 39, 190, 190, 231, 116, 233, 228, 119, 120, 190, + /* 130 */ 197, 92, 45, 46, 47, 48, 49, 50, 51, 52, + /* 140 */ 53, 54, 55, 212, 212, 214, 214, 190, 215, 16, + /* 150 */ 17, 212, 86, 214, 21, 89, 90, 24, 25, 26, + /* 160 */ 27, 28, 37, 38, 39, 228, 33, 34, 228, 228, + /* 170 */ 37, 38, 39, 1, 2, 228, 239, 5, 44, 7, + /* 180 */ 239, 9, 228, 1, 2, 190, 115, 5, 190, 7, + /* 190 */ 233, 9, 228, 122, 60, 124, 101, 102, 64, 65, + /* 200 */ 66, 61, 62, 63, 197, 33, 34, 209, 74, 37, + /* 210 */ 57, 58, 59, 197, 228, 33, 34, 1, 2, 37, + /* 220 */ 1, 5, 215, 7, 228, 9, 195, 93, 233, 198, + /* 230 */ 193, 215, 87, 216, 91, 198, 91, 94, 87, 87, + /* 240 */ 56, 228, 91, 91, 227, 111, 56, 228, 114, 33, + /* 250 */ 34, 25, 26, 27, 28, 121, 37, 87, 86, 33, + /* 260 */ 34, 91, 87, 37, 38, 39, 91, 95, 86, 5, + /* 270 */ 86, 7, 5, 228, 7, 87, 86, 95, 91, 91, + /* 280 */ 228, 87, 110, 87, 5, 91, 7, 91, 117, 118, + /* 290 */ 117, 118, 110, 106, 67, 68, 228, 228, 215, 210, + /* 300 */ 228, 228, 86, 210, 228, 228, 210, 210, 210, 210, + /* 310 */ 210, 95, 211, 190, 190, 113, 234, 190, 234, 217, + /* 320 */ 190, 190, 190, 238, 56, 190, 110, 190, 190, 190, + /* 330 */ 95, 190, 190, 230, 190, 230, 190, 190, 190, 190, + /* 340 */ 190, 105, 226, 225, 190, 190, 190, 190, 190, 190, + /* 350 */ 190, 190, 190, 190, 190, 107, 191, 191, 104, 99, + /* 360 */ 224, 103, 96, 98, 109, 93, 97, 72, 191, 85, + /* 370 */ 84, 191, 83, 82, 57, 194, 79, 81, 80, 78, + /* 380 */ 75, 77, 72, 5, 123, 196, 194, 201, 205, 221, + /* 390 */ 223, 222, 213, 208, 220, 219, 218, 202, 207, 206, + /* 400 */ 200, 204, 203, 199, 191, 191, 5, 192, 191, 70, + /* 410 */ 192, 123, 5, 123, 5, 123, 70, 5, 70, 79, + /* 420 */ 86, 115, 123, 86, 113, 112, 1, 91, 87, 86, + /* 430 */ 86, 100, 87, 86, 1, 100, 67, 87, 86, 86, + /* 440 */ 91, 86, 88, 5, 88, 5, 5, 5, 5, 1, + /* 450 */ 5, 5, 91, 73, 5, 16, 118, 91, 5, 15, + /* 460 */ 5, 87, 86, 5, 70, 73, 56, 0, 21, 240, + /* 470 */ 240, 21, +}; +#define YY_SHIFT_USE_DFLT (-74) +#define YY_SHIFT_COUNT (226) +#define YY_SHIFT_MIN (-73) +#define YY_SHIFT_MAX (467) +static const short yy_shift_ofst[] = { + /* 0 */ 134, 172, 216, 75, 75, 75, 75, 75, 75, -1, + /* 10 */ 87, 216, 216, 216, 39, 39, 39, 75, 75, 75, + /* 20 */ 75, -48, -48, -74, 182, 216, 216, 216, 216, 216, + /* 30 */ 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, + /* 40 */ 216, 216, 216, 39, 39, 70, 70, 70, 70, 70, + /* 50 */ 70, 66, 70, 75, 75, 95, 95, 143, 75, 75, + /* 60 */ 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, + /* 70 */ 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, + /* 80 */ 75, 75, 75, 75, 75, 202, 268, 268, 235, 235, + /* 90 */ 236, 248, 254, 260, 258, 265, 269, 266, 255, 272, + /* 100 */ 268, 268, 295, 284, 286, 289, 291, 317, 296, 298, + /* 110 */ 297, 301, 305, 304, 295, 268, 268, 310, 268, 310, + /* 120 */ 26, 53, 53, 53, 53, 53, 81, 133, 226, 226, + /* 130 */ 226, -32, -32, -32, -32, -52, 8, 71, 125, 125, + /* 140 */ 49, 140, 153, -73, 145, 219, 184, 151, 152, 170, + /* 150 */ 175, 264, 267, 190, 9, 187, 188, 194, 196, 279, + /* 160 */ 171, 173, 227, 378, 261, 401, 288, 339, 407, 290, + /* 170 */ 409, 292, 346, 412, 299, 348, 340, 306, 334, 337, + /* 180 */ 311, 313, 341, 343, 425, 344, 345, 347, 336, 331, + /* 190 */ 349, 335, 350, 352, 433, 353, 354, 355, 356, 369, + /* 200 */ 438, 440, 441, 442, 443, 448, 445, 446, 361, 449, + /* 210 */ 380, 444, 439, 338, 366, 453, 455, 374, 376, 366, + /* 220 */ 458, 394, 392, 447, 450, 410, 467, +}; +#define YY_REDUCE_USE_DFLT (-207) +#define YY_REDUCE_COUNT (119) +#define YY_REDUCE_MIN (-206) +#define YY_REDUCE_MAX (218) +static const short yy_reduce_ofst[] = { + /* 0 */ -184, -206, -179, -134, -108, -129, -69, -68, -61, -135, + /* 10 */ -155, -63, -59, -166, -67, 7, 16, -43, -5, -2, + /* 20 */ -169, 31, 37, 17, -156, -112, -102, -60, -53, -46, + /* 30 */ -36, -14, -4, 13, 19, 45, 52, 68, 69, 72, + /* 40 */ 73, 76, 77, -122, 83, 89, 93, 96, 97, 98, + /* 50 */ 99, 101, 100, 123, 124, 82, 84, 102, 127, 130, + /* 60 */ 131, 132, 135, 137, 138, 139, 141, 142, 144, 146, + /* 70 */ 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, + /* 80 */ 160, 161, 162, 163, 164, 85, 165, 166, 103, 105, + /* 90 */ 116, 118, 136, 167, 169, 168, 174, 176, 178, 179, + /* 100 */ 177, 180, 181, 185, 191, 193, 183, 197, 199, 195, + /* 110 */ 186, 200, 189, 204, 192, 213, 214, 215, 217, 218, +}; +static const YYACTIONTYPE yy_default[] = { + /* 0 */ 707, 691, 691, 707, 707, 707, 707, 707, 707, 624, + /* 10 */ 539, 707, 707, 691, 707, 707, 707, 707, 707, 707, + /* 20 */ 707, 569, 569, 618, 707, 707, 707, 707, 707, 707, + /* 30 */ 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, + /* 40 */ 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, + /* 50 */ 707, 707, 707, 707, 707, 641, 641, 707, 707, 707, + /* 60 */ 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, + /* 70 */ 707, 707, 555, 707, 707, 707, 707, 707, 707, 707, + /* 80 */ 707, 707, 707, 707, 707, 707, 541, 541, 707, 707, + /* 90 */ 648, 652, 646, 634, 642, 633, 629, 628, 656, 707, + /* 100 */ 541, 541, 564, 590, 588, 586, 584, 582, 580, 578, + /* 110 */ 576, 574, 567, 571, 564, 541, 541, 562, 541, 562, + /* 120 */ 707, 695, 696, 657, 690, 647, 674, 673, 686, 680, + /* 130 */ 679, 678, 677, 676, 675, 707, 707, 707, 682, 681, + /* 140 */ 707, 707, 707, 707, 707, 707, 659, 707, 707, 707, + /* 150 */ 707, 707, 707, 659, 653, 649, 707, 707, 707, 707, + /* 160 */ 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, + /* 170 */ 707, 707, 707, 707, 707, 707, 707, 707, 707, 707, + /* 180 */ 692, 707, 707, 707, 707, 707, 707, 707, 643, 707, + /* 190 */ 635, 707, 707, 707, 707, 707, 707, 707, 599, 707, + /* 200 */ 707, 707, 707, 707, 707, 707, 707, 707, 568, 707, + /* 210 */ 707, 707, 707, 707, 700, 707, 707, 707, 593, 698, + /* 220 */ 707, 707, 707, 545, 543, 707, 707, +}; +/********** End of lemon-generated parsing tables *****************************/ + +/* The next table maps tokens (terminal symbols) into fallback tokens. +** If a construct like the following: +** +** %fallback ID X Y Z. +** +** appears in the grammar, then ID becomes a fallback token for X, Y, +** and Z. Whenever one of the tokens X, Y, or Z is input to the parser +** but it does not parse, the type of the token is changed to ID and +** the parse is retried before an error is thrown. +** +** This feature can be used, for example, to cause some keywords in a language +** to revert to identifiers if they keyword does not apply in the context where +** it appears. +*/ +#ifdef YYFALLBACK +static const YYCODETYPE yyFallback[] = { + 0, /* $ => nothing */ + 0, /* ID => nothing */ + 1, /* BOOL => ID */ + 1, /* TINYINT => ID */ + 1, /* SMALLINT => ID */ + 1, /* INTEGER => ID */ + 1, /* BIGINT => ID */ + 1, /* FLOAT => ID */ + 1, /* DOUBLE => ID */ + 1, /* STRING => ID */ + 1, /* TIMESTAMP => ID */ + 1, /* BINARY => ID */ + 1, /* NCHAR => ID */ + 0, /* OR => nothing */ + 0, /* AND => nothing */ + 0, /* NOT => nothing */ + 0, /* EQ => nothing */ + 0, /* NE => nothing */ + 0, /* ISNULL => nothing */ + 0, /* NOTNULL => nothing */ + 0, /* IS => nothing */ + 1, /* LIKE => ID */ + 1, /* GLOB => ID */ + 0, /* BETWEEN => nothing */ + 0, /* IN => nothing */ + 0, /* GT => nothing */ + 0, /* GE => nothing */ + 0, /* LT => nothing */ + 0, /* LE => nothing */ + 0, /* BITAND => nothing */ + 0, /* BITOR => nothing */ + 0, /* LSHIFT => nothing */ + 0, /* RSHIFT => nothing */ + 0, /* PLUS => nothing */ + 0, /* MINUS => nothing */ + 0, /* DIVIDE => nothing */ + 0, /* TIMES => nothing */ + 0, /* STAR => nothing */ + 0, /* SLASH => nothing */ + 0, /* REM => nothing */ + 0, /* CONCAT => nothing */ + 0, /* UMINUS => nothing */ + 0, /* UPLUS => nothing */ + 0, /* BITNOT => nothing */ + 0, /* SHOW => nothing */ + 0, /* DATABASES => nothing */ + 0, /* MNODES => nothing */ + 0, /* DNODES => nothing */ + 0, /* USERS => nothing */ + 0, /* MODULES => nothing */ + 0, /* QUERIES => nothing */ + 0, /* CONNECTIONS => nothing */ + 0, /* STREAMS => nothing */ + 0, /* CONFIGS => nothing */ + 0, /* SCORES => nothing */ + 0, /* GRANTS => nothing */ + 0, /* DOT => nothing */ + 0, /* TABLES => nothing */ + 0, /* STABLES => nothing */ + 0, /* VGROUPS => nothing */ + 0, /* DROP => nothing */ + 0, /* TABLE => nothing */ + 1, /* DATABASE => ID */ + 0, /* USER => nothing */ + 0, /* USE => nothing */ + 0, /* DESCRIBE => nothing */ + 0, /* ALTER => nothing */ + 0, /* PASS => nothing */ + 0, /* PRIVILEGE => nothing */ + 0, /* DNODE => nothing */ + 1, /* IP => ID */ + 0, /* LOCAL => nothing */ + 0, /* IF => nothing */ + 0, /* EXISTS => nothing */ + 0, /* CREATE => nothing */ + 0, /* KEEP => nothing */ + 0, /* REPLICA => nothing */ + 0, /* DAYS => nothing */ + 0, /* ROWS => nothing */ + 0, /* CACHE => nothing */ + 0, /* ABLOCKS => nothing */ + 0, /* TBLOCKS => nothing */ + 0, /* CTIME => nothing */ + 0, /* CLOG => nothing */ + 0, /* COMP => nothing */ + 0, /* PRECISION => nothing */ + 0, /* LP => nothing */ + 0, /* RP => nothing */ + 0, /* TAGS => nothing */ + 0, /* USING => nothing */ + 0, /* AS => nothing */ + 0, /* COMMA => nothing */ + 0, /* NULL => nothing */ + 0, /* SELECT => nothing */ + 0, /* FROM => nothing */ + 0, /* VARIABLE => nothing */ + 0, /* INTERVAL => nothing */ + 0, /* FILL => nothing */ + 0, /* SLIDING => nothing */ + 0, /* ORDER => nothing */ + 0, /* BY => nothing */ + 1, /* ASC => ID */ + 1, /* DESC => ID */ + 0, /* GROUP => nothing */ + 0, /* HAVING => nothing */ + 0, /* LIMIT => nothing */ + 1, /* OFFSET => ID */ + 0, /* SLIMIT => nothing */ + 0, /* SOFFSET => nothing */ + 0, /* WHERE => nothing */ + 1, /* NOW => ID */ + 0, /* INSERT => nothing */ + 0, /* INTO => nothing */ + 0, /* VALUES => nothing */ + 0, /* RESET => nothing */ + 0, /* QUERY => nothing */ + 0, /* ADD => nothing */ + 0, /* COLUMN => nothing */ + 0, /* TAG => nothing */ + 0, /* CHANGE => nothing */ + 0, /* SET => nothing */ + 0, /* KILL => nothing */ + 0, /* CONNECTION => nothing */ + 0, /* COLON => nothing */ + 0, /* STREAM => nothing */ + 1, /* ABORT => ID */ + 1, /* AFTER => ID */ + 1, /* ATTACH => ID */ + 1, /* BEFORE => ID */ + 1, /* BEGIN => ID */ + 1, /* CASCADE => ID */ + 1, /* CLUSTER => ID */ + 1, /* CONFLICT => ID */ + 1, /* COPY => ID */ + 1, /* DEFERRED => ID */ + 1, /* DELIMITERS => ID */ + 1, /* DETACH => ID */ + 1, /* EACH => ID */ + 1, /* END => ID */ + 1, /* EXPLAIN => ID */ + 1, /* FAIL => ID */ + 1, /* FOR => ID */ + 1, /* IGNORE => ID */ + 1, /* IMMEDIATE => ID */ + 1, /* INITIALLY => ID */ + 1, /* INSTEAD => ID */ + 1, /* MATCH => ID */ + 1, /* KEY => ID */ + 1, /* OF => ID */ + 1, /* RAISE => ID */ + 1, /* REPLACE => ID */ + 1, /* RESTRICT => ID */ + 1, /* ROW => ID */ + 1, /* STATEMENT => ID */ + 1, /* TRIGGER => ID */ + 1, /* VIEW => ID */ + 1, /* ALL => ID */ + 1, /* COUNT => ID */ + 1, /* SUM => ID */ + 1, /* AVG => ID */ + 1, /* MIN => ID */ + 1, /* MAX => ID */ + 1, /* FIRST => ID */ + 1, /* LAST => ID */ + 1, /* TOP => ID */ + 1, /* BOTTOM => ID */ + 1, /* STDDEV => ID */ + 1, /* PERCENTILE => ID */ + 1, /* APERCENTILE => ID */ + 1, /* LEASTSQUARES => ID */ + 1, /* HISTOGRAM => ID */ + 1, /* DIFF => ID */ + 1, /* SPREAD => ID */ + 1, /* WAVG => ID */ + 1, /* INTERP => ID */ + 1, /* LAST_ROW => ID */ + 1, /* SEMI => ID */ + 1, /* NONE => ID */ + 1, /* PREV => ID */ + 1, /* LINEAR => ID */ + 1, /* IMPORT => ID */ + 1, /* METRIC => ID */ + 1, /* TBNAME => ID */ + 1, /* JOIN => ID */ + 1, /* METRICS => ID */ + 1, /* STABLE => ID */ +}; +#endif /* YYFALLBACK */ + +/* The following structure represents a single element of the +** parser's stack. Information stored includes: +** +** + The state number for the parser at this level of the stack. +** +** + The value of the token stored at this level of the stack. +** (In other words, the "major" token.) +** +** + The semantic value stored at this level of the stack. This is +** the information used by the action routines in the grammar. +** It is sometimes called the "minor" token. +** +** After the "shift" half of a SHIFTREDUCE action, the stateno field +** actually contains the reduce action for the second half of the +** SHIFTREDUCE. +*/ +struct yyStackEntry { + YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */ + YYCODETYPE major; /* The major token value. This is the code + ** number for the token at this stack level */ + YYMINORTYPE minor; /* The user-supplied minor token value. This + ** is the value of the token */ +}; +typedef struct yyStackEntry yyStackEntry; + +/* The state of the parser is completely contained in an instance of +** the following structure */ +struct yyParser { + int yyidx; /* Index of top element in stack */ +#ifdef YYTRACKMAXSTACKDEPTH + int yyidxMax; /* Maximum value of yyidx */ +#endif + int yyerrcnt; /* Shifts left before out of the error */ + ParseARG_SDECL /* A place to hold %extra_argument */ +#if YYSTACKDEPTH<=0 + int yystksz; /* Current side of the stack */ + yyStackEntry *yystack; /* The parser's stack */ +#else + yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ +#endif +}; +typedef struct yyParser yyParser; + +#ifndef NDEBUG +#include +static FILE *yyTraceFILE = 0; +static char *yyTracePrompt = 0; +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* +** Turn parser tracing on by giving a stream to which to write the trace +** and a prompt to preface each trace message. Tracing is turned off +** by making either argument NULL +** +** Inputs: +**
    +**
  • A FILE* to which trace output should be written. +** If NULL, then tracing is turned off. +**
  • A prefix string written at the beginning of every +** line of trace output. If NULL, then tracing is +** turned off. +**
+** +** Outputs: +** None. +*/ +void ParseTrace(FILE *TraceFILE, char *zTracePrompt){ + yyTraceFILE = TraceFILE; + yyTracePrompt = zTracePrompt; + if( yyTraceFILE==0 ) yyTracePrompt = 0; + else if( yyTracePrompt==0 ) yyTraceFILE = 0; +} +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* For tracing shifts, the names of all terminals and nonterminals +** are required. The following table supplies these names */ +static const char *const yyTokenName[] = { + "$", "ID", "BOOL", "TINYINT", + "SMALLINT", "INTEGER", "BIGINT", "FLOAT", + "DOUBLE", "STRING", "TIMESTAMP", "BINARY", + "NCHAR", "OR", "AND", "NOT", + "EQ", "NE", "ISNULL", "NOTNULL", + "IS", "LIKE", "GLOB", "BETWEEN", + "IN", "GT", "GE", "LT", + "LE", "BITAND", "BITOR", "LSHIFT", + "RSHIFT", "PLUS", "MINUS", "DIVIDE", + "TIMES", "STAR", "SLASH", "REM", + "CONCAT", "UMINUS", "UPLUS", "BITNOT", + "SHOW", "DATABASES", "MNODES", "DNODES", + "USERS", "MODULES", "QUERIES", "CONNECTIONS", + "STREAMS", "CONFIGS", "SCORES", "GRANTS", + "DOT", "TABLES", "STABLES", "VGROUPS", + "DROP", "TABLE", "DATABASE", "USER", + "USE", "DESCRIBE", "ALTER", "PASS", + "PRIVILEGE", "DNODE", "IP", "LOCAL", + "IF", "EXISTS", "CREATE", "KEEP", + "REPLICA", "DAYS", "ROWS", "CACHE", + "ABLOCKS", "TBLOCKS", "CTIME", "CLOG", + "COMP", "PRECISION", "LP", "RP", + "TAGS", "USING", "AS", "COMMA", + "NULL", "SELECT", "FROM", "VARIABLE", + "INTERVAL", "FILL", "SLIDING", "ORDER", + "BY", "ASC", "DESC", "GROUP", + "HAVING", "LIMIT", "OFFSET", "SLIMIT", + "SOFFSET", "WHERE", "NOW", "INSERT", + "INTO", "VALUES", "RESET", "QUERY", + "ADD", "COLUMN", "TAG", "CHANGE", + "SET", "KILL", "CONNECTION", "COLON", + "STREAM", "ABORT", "AFTER", "ATTACH", + "BEFORE", "BEGIN", "CASCADE", "CLUSTER", + "CONFLICT", "COPY", "DEFERRED", "DELIMITERS", + "DETACH", "EACH", "END", "EXPLAIN", + "FAIL", "FOR", "IGNORE", "IMMEDIATE", + "INITIALLY", "INSTEAD", "MATCH", "KEY", + "OF", "RAISE", "REPLACE", "RESTRICT", + "ROW", "STATEMENT", "TRIGGER", "VIEW", + "ALL", "COUNT", "SUM", "AVG", + "MIN", "MAX", "FIRST", "LAST", + "TOP", "BOTTOM", "STDDEV", "PERCENTILE", + "APERCENTILE", "LEASTSQUARES", "HISTOGRAM", "DIFF", + "SPREAD", "WAVG", "INTERP", "LAST_ROW", + "SEMI", "NONE", "PREV", "LINEAR", + "IMPORT", "METRIC", "TBNAME", "JOIN", + "METRICS", "STABLE", "error", "program", + "cmd", "dbPrefix", "ids", "cpxName", + "ifexists", "alter_db_optr", "ifnotexists", "db_optr", + "keep", "tagitemlist", "replica", "day", + "rows", "cache", "ablocks", "tblocks", + "tables", "ctime", "clog", "comp", + "prec", "typename", "signed", "create_table_args", + "columnlist", "select", "column", "tagitem", + "selcollist", "from", "where_opt", "interval_opt", + "fill_opt", "sliding_opt", "groupby_opt", "orderby_opt", + "having_opt", "slimit_opt", "limit_opt", "sclp", + "expr", "as", "tmvar", "sortlist", + "sortitem", "item", "sortorder", "grouplist", + "exprlist", "expritem", "insert_value_list", "itemlist", +}; +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* For tracing reduce actions, the names of all rules are required. +*/ +static const char *const yyRuleName[] = { + /* 0 */ "program ::= cmd", + /* 1 */ "cmd ::= SHOW DATABASES", + /* 2 */ "cmd ::= SHOW MNODES", + /* 3 */ "cmd ::= SHOW DNODES", + /* 4 */ "cmd ::= SHOW USERS", + /* 5 */ "cmd ::= SHOW MODULES", + /* 6 */ "cmd ::= SHOW QUERIES", + /* 7 */ "cmd ::= SHOW CONNECTIONS", + /* 8 */ "cmd ::= SHOW STREAMS", + /* 9 */ "cmd ::= SHOW CONFIGS", + /* 10 */ "cmd ::= SHOW SCORES", + /* 11 */ "cmd ::= SHOW GRANTS", + /* 12 */ "dbPrefix ::=", + /* 13 */ "dbPrefix ::= ids DOT", + /* 14 */ "cpxName ::=", + /* 15 */ "cpxName ::= DOT ids", + /* 16 */ "cmd ::= SHOW dbPrefix TABLES", + /* 17 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", + /* 18 */ "cmd ::= SHOW dbPrefix STABLES", + /* 19 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", + /* 20 */ "cmd ::= SHOW dbPrefix VGROUPS", + /* 21 */ "cmd ::= DROP TABLE ifexists ids cpxName", + /* 22 */ "cmd ::= DROP DATABASE ifexists ids", + /* 23 */ "cmd ::= DROP USER ids", + /* 24 */ "cmd ::= USE ids", + /* 25 */ "cmd ::= DESCRIBE ids cpxName", + /* 26 */ "cmd ::= ALTER USER ids PASS ids", + /* 27 */ "cmd ::= ALTER USER ids PRIVILEGE ids", + /* 28 */ "cmd ::= ALTER DNODE IP ids", + /* 29 */ "cmd ::= ALTER DNODE IP ids ids", + /* 30 */ "cmd ::= ALTER LOCAL ids", + /* 31 */ "cmd ::= ALTER DATABASE ids alter_db_optr", + /* 32 */ "ids ::= ID", + /* 33 */ "ids ::= STRING", + /* 34 */ "ifexists ::= IF EXISTS", + /* 35 */ "ifexists ::=", + /* 36 */ "ifnotexists ::= IF NOT EXISTS", + /* 37 */ "ifnotexists ::=", + /* 38 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", + /* 39 */ "cmd ::= CREATE USER ids PASS ids", + /* 40 */ "keep ::=", + /* 41 */ "keep ::= KEEP tagitemlist", + /* 42 */ "replica ::=", + /* 43 */ "replica ::= REPLICA INTEGER", + /* 44 */ "day ::=", + /* 45 */ "day ::= DAYS INTEGER", + /* 46 */ "rows ::= ROWS INTEGER", + /* 47 */ "rows ::=", + /* 48 */ "cache ::= CACHE INTEGER", + /* 49 */ "cache ::=", + /* 50 */ "ablocks ::= ABLOCKS ID", + /* 51 */ "ablocks ::=", + /* 52 */ "tblocks ::= TBLOCKS INTEGER", + /* 53 */ "tblocks ::=", + /* 54 */ "tables ::= TABLES INTEGER", + /* 55 */ "tables ::=", + /* 56 */ "ctime ::= CTIME INTEGER", + /* 57 */ "ctime ::=", + /* 58 */ "clog ::= CLOG INTEGER", + /* 59 */ "clog ::=", + /* 60 */ "comp ::= COMP INTEGER", + /* 61 */ "comp ::=", + /* 62 */ "prec ::= PRECISION ids", + /* 63 */ "prec ::=", + /* 64 */ "db_optr ::= replica day keep rows cache ablocks tblocks tables ctime clog comp prec", + /* 65 */ "alter_db_optr ::= replica", + /* 66 */ "typename ::= ids", + /* 67 */ "typename ::= ids LP signed RP", + /* 68 */ "signed ::= INTEGER", + /* 69 */ "signed ::= PLUS INTEGER", + /* 70 */ "signed ::= MINUS INTEGER", + /* 71 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", + /* 72 */ "create_table_args ::= LP columnlist RP", + /* 73 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", + /* 74 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", + /* 75 */ "create_table_args ::= AS select", + /* 76 */ "columnlist ::= columnlist COMMA column", + /* 77 */ "columnlist ::= column", + /* 78 */ "column ::= ids typename", + /* 79 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 80 */ "tagitemlist ::= tagitem", + /* 81 */ "tagitem ::= INTEGER", + /* 82 */ "tagitem ::= FLOAT", + /* 83 */ "tagitem ::= STRING", + /* 84 */ "tagitem ::= BOOL", + /* 85 */ "tagitem ::= NULL", + /* 86 */ "tagitem ::= MINUS INTEGER", + /* 87 */ "tagitem ::= MINUS FLOAT", + /* 88 */ "cmd ::= select", + /* 89 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 90 */ "sclp ::= selcollist COMMA", + /* 91 */ "sclp ::=", + /* 92 */ "selcollist ::= sclp expr as", + /* 93 */ "selcollist ::= sclp STAR", + /* 94 */ "selcollist ::= sclp ID DOT STAR", + /* 95 */ "as ::= AS ids", + /* 96 */ "as ::= ids", + /* 97 */ "as ::=", + /* 98 */ "from ::= FROM ids cpxName", + /* 99 */ "tmvar ::= VARIABLE", + /* 100 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 101 */ "interval_opt ::=", + /* 102 */ "fill_opt ::=", + /* 103 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 104 */ "fill_opt ::= FILL LP ID RP", + /* 105 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 106 */ "sliding_opt ::=", + /* 107 */ "orderby_opt ::=", + /* 108 */ "orderby_opt ::= ORDER BY sortlist", + /* 109 */ "sortlist ::= sortlist COMMA item sortorder", + /* 110 */ "sortlist ::= item sortorder", + /* 111 */ "item ::= ids", + /* 112 */ "sortorder ::= ASC", + /* 113 */ "sortorder ::= DESC", + /* 114 */ "sortorder ::=", + /* 115 */ "groupby_opt ::=", + /* 116 */ "groupby_opt ::= GROUP BY grouplist", + /* 117 */ "grouplist ::= grouplist COMMA item", + /* 118 */ "grouplist ::= item", + /* 119 */ "having_opt ::=", + /* 120 */ "having_opt ::= HAVING expr", + /* 121 */ "limit_opt ::=", + /* 122 */ "limit_opt ::= LIMIT signed", + /* 123 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 124 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 125 */ "slimit_opt ::=", + /* 126 */ "slimit_opt ::= SLIMIT signed", + /* 127 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 128 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 129 */ "where_opt ::=", + /* 130 */ "where_opt ::= WHERE expr", + /* 131 */ "expr ::= LP expr RP", + /* 132 */ "expr ::= ID", + /* 133 */ "expr ::= ID DOT ID", + /* 134 */ "expr ::= INTEGER", + /* 135 */ "expr ::= MINUS INTEGER", + /* 136 */ "expr ::= PLUS INTEGER", + /* 137 */ "expr ::= FLOAT", + /* 138 */ "expr ::= MINUS FLOAT", + /* 139 */ "expr ::= PLUS FLOAT", + /* 140 */ "expr ::= STRING", + /* 141 */ "expr ::= NOW", + /* 142 */ "expr ::= VARIABLE", + /* 143 */ "expr ::= BOOL", + /* 144 */ "expr ::= ID LP exprlist RP", + /* 145 */ "expr ::= ID LP STAR RP", + /* 146 */ "expr ::= expr AND expr", + /* 147 */ "expr ::= expr OR expr", + /* 148 */ "expr ::= expr LT expr", + /* 149 */ "expr ::= expr GT expr", + /* 150 */ "expr ::= expr LE expr", + /* 151 */ "expr ::= expr GE expr", + /* 152 */ "expr ::= expr NE expr", + /* 153 */ "expr ::= expr EQ expr", + /* 154 */ "expr ::= expr PLUS expr", + /* 155 */ "expr ::= expr MINUS expr", + /* 156 */ "expr ::= expr STAR expr", + /* 157 */ "expr ::= expr SLASH expr", + /* 158 */ "expr ::= expr REM expr", + /* 159 */ "expr ::= expr LIKE expr", + /* 160 */ "expr ::= expr IN LP exprlist RP", + /* 161 */ "exprlist ::= exprlist COMMA expritem", + /* 162 */ "exprlist ::= expritem", + /* 163 */ "expritem ::= expr", + /* 164 */ "expritem ::=", + /* 165 */ "cmd ::= INSERT INTO cpxName insert_value_list", + /* 166 */ "insert_value_list ::= VALUES LP itemlist RP", + /* 167 */ "insert_value_list ::= insert_value_list VALUES LP itemlist RP", + /* 168 */ "itemlist ::= itemlist COMMA expr", + /* 169 */ "itemlist ::= expr", + /* 170 */ "cmd ::= RESET QUERY CACHE", + /* 171 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 172 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 173 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 174 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 175 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 176 */ "cmd ::= ALTER TABLE ids cpxName SET ids EQ tagitem", + /* 177 */ "cmd ::= KILL CONNECTION IP COLON INTEGER", + /* 178 */ "cmd ::= KILL STREAM IP COLON INTEGER COLON INTEGER", + /* 179 */ "cmd ::= KILL QUERY IP COLON INTEGER COLON INTEGER", +}; +#endif /* NDEBUG */ + + +#if YYSTACKDEPTH<=0 +/* +** Try to increase the size of the parser stack. +*/ +static void yyGrowStack(yyParser *p){ + int newSize; + yyStackEntry *pNew; + + newSize = p->yystksz*2 + 100; + pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + if( pNew ){ + p->yystack = pNew; + p->yystksz = newSize; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows to %d entries!\n", + yyTracePrompt, p->yystksz); + } +#endif + } +} +#endif + +/* Datatype of the argument to the memory allocated passed as the +** second argument to ParseAlloc() below. This can be changed by +** putting an appropriate #define in the %include section of the input +** grammar. +*/ +#ifndef YYMALLOCARGTYPE +# define YYMALLOCARGTYPE size_t +#endif + +/* +** This function allocates a new parser. +** The only argument is a pointer to a function which works like +** malloc. +** +** Inputs: +** A pointer to the function used to allocate memory. +** +** Outputs: +** A pointer to a parser. This pointer is used in subsequent calls +** to Parse and ParseFree. +*/ +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ + yyParser *pParser; + pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( pParser ){ + pParser->yyidx = -1; +#ifdef YYTRACKMAXSTACKDEPTH + pParser->yyidxMax = 0; +#endif +#if YYSTACKDEPTH<=0 + pParser->yystack = NULL; + pParser->yystksz = 0; + yyGrowStack(pParser); +#endif + } + return pParser; +} + +/* The following function deletes the "minor type" or semantic value +** associated with a symbol. The symbol can be either a terminal +** or nonterminal. "yymajor" is the symbol code, and "yypminor" is +** a pointer to the value to be deleted. The code used to do the +** deletions is derived from the %destructor and/or %token_destructor +** directives of the input grammar. +*/ +static void yy_destructor( + yyParser *yypParser, /* The parser */ + YYCODETYPE yymajor, /* Type code for object to destroy */ + YYMINORTYPE *yypminor /* The object to be destroyed */ +){ + ParseARG_FETCH; + switch( yymajor ){ + /* Here is inserted the actions which take place when a + ** terminal or non-terminal is destroyed. This can happen + ** when the symbol is popped from the stack during a + ** reduce or during error processing or when a parser is + ** being destroyed before it is finished parsing. + ** + ** Note: during a reduce, the only symbols destroyed are those + ** which appear on the RHS of the rule, but which are *not* used + ** inside the C code. + */ +/********* Begin destructor definitions ***************************************/ + case 196: /* keep */ + case 197: /* tagitemlist */ + case 220: /* fill_opt */ + case 222: /* groupby_opt */ + case 223: /* orderby_opt */ + case 231: /* sortlist */ + case 235: /* grouplist */ +{ +tVariantListDestroy((yypminor->yy216)); +} + break; + case 213: /* select */ +{ +destroyQuerySql((yypminor->yy24)); +} + break; + case 216: /* selcollist */ + case 227: /* sclp */ + case 236: /* exprlist */ + case 239: /* itemlist */ +{ +tSQLExprListDestroy((yypminor->yy98)); +} + break; + case 218: /* where_opt */ + case 224: /* having_opt */ + case 228: /* expr */ + case 237: /* expritem */ +{ +tSQLExprDestroy((yypminor->yy370)); +} + break; + case 232: /* sortitem */ +{ +tVariantDestroy(&(yypminor->yy266)); +} + break; +/********* End destructor definitions *****************************************/ + default: break; /* If no destructor action specified: do nothing */ + } +} + +/* +** Pop the parser's stack once. +** +** If there is a destructor routine associated with the token which +** is popped from the stack, then call it. +*/ +static void yy_pop_parser_stack(yyParser *pParser){ + yyStackEntry *yytos; + assert( pParser->yyidx>=0 ); + yytos = &pParser->yystack[pParser->yyidx--]; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + yy_destructor(pParser, yytos->major, &yytos->minor); +} + +/* +** Deallocate and destroy a parser. Destructors are called for +** all stack elements before shutting the parser down. +** +** If the YYPARSEFREENEVERNULL macro exists (for example because it +** is defined in a %include section of the input grammar) then it is +** assumed that the input pointer is never NULL. +*/ +void ParseFree( + void *p, /* The parser to be deleted */ + void (*freeProc)(void*) /* Function used to reclaim memory */ +){ + yyParser *pParser = (yyParser*)p; +#ifndef YYPARSEFREENEVERNULL + if( pParser==0 ) return; +#endif + while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser); +#if YYSTACKDEPTH<=0 + free(pParser->yystack); +#endif + (*freeProc)((void*)pParser); +} + +/* +** Return the peak depth of the stack for a parser. +*/ +#ifdef YYTRACKMAXSTACKDEPTH +int ParseStackPeak(void *p){ + yyParser *pParser = (yyParser*)p; + return pParser->yyidxMax; +} +#endif + +/* +** Find the appropriate action for a parser given the terminal +** look-ahead token iLookAhead. +*/ +static int yy_find_shift_action( + yyParser *pParser, /* The parser */ + YYCODETYPE iLookAhead /* The look-ahead token */ +){ + int i; + int stateno = pParser->yystack[pParser->yyidx].stateno; + + if( stateno>=YY_MIN_REDUCE ) return stateno; + assert( stateno <= YY_SHIFT_COUNT ); + do{ + i = yy_shift_ofst[stateno]; + if( i==YY_SHIFT_USE_DFLT ) return yy_default[stateno]; + assert( iLookAhead!=YYNOCODE ); + i += iLookAhead; + if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ + if( iLookAhead>0 ){ +#ifdef YYFALLBACK + YYCODETYPE iFallback; /* Fallback token */ + if( iLookAhead %s\n", + yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); + } +#endif + assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ + iLookAhead = iFallback; + continue; + } +#endif +#ifdef YYWILDCARD + { + int j = i - iLookAhead + YYWILDCARD; + if( +#if YY_SHIFT_MIN+YYWILDCARD<0 + j>=0 && +#endif +#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT + j %s\n", + yyTracePrompt, yyTokenName[iLookAhead], + yyTokenName[YYWILDCARD]); + } +#endif /* NDEBUG */ + return yy_action[j]; + } + } +#endif /* YYWILDCARD */ + } + return yy_default[stateno]; + }else{ + return yy_action[i]; + } + }while(1); +} + +/* +** Find the appropriate action for a parser given the non-terminal +** look-ahead token iLookAhead. +*/ +static int yy_find_reduce_action( + int stateno, /* Current state number */ + YYCODETYPE iLookAhead /* The look-ahead token */ +){ + int i; +#ifdef YYERRORSYMBOL + if( stateno>YY_REDUCE_COUNT ){ + return yy_default[stateno]; + } +#else + assert( stateno<=YY_REDUCE_COUNT ); +#endif + i = yy_reduce_ofst[stateno]; + assert( i!=YY_REDUCE_USE_DFLT ); + assert( iLookAhead!=YYNOCODE ); + i += iLookAhead; +#ifdef YYERRORSYMBOL + if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ + return yy_default[stateno]; + } +#else + assert( i>=0 && iyyidx--; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); + } +#endif + while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will execute if the parser + ** stack every overflows */ +/******** Begin %stack_overflow code ******************************************/ +/******** End %stack_overflow code ********************************************/ + ParseARG_STORE; /* Suppress warning about unused %extra_argument var */ +} + +/* +** Print tracing information for a SHIFT action +*/ +#ifndef NDEBUG +static void yyTraceShift(yyParser *yypParser, int yyNewState){ + if( yyTraceFILE ){ + if( yyNewStateyystack[yypParser->yyidx].major], + yyNewState); + }else{ + fprintf(yyTraceFILE,"%sShift '%s'\n", + yyTracePrompt,yyTokenName[yypParser->yystack[yypParser->yyidx].major]); + } + } +} +#else +# define yyTraceShift(X,Y) +#endif + +/* +** Perform a shift action. +*/ +static void yy_shift( + yyParser *yypParser, /* The parser to be shifted */ + int yyNewState, /* The new state to shift in */ + int yyMajor, /* The major token to shift in */ + YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */ +){ + yyStackEntry *yytos; + yypParser->yyidx++; +#ifdef YYTRACKMAXSTACKDEPTH + if( yypParser->yyidx>yypParser->yyidxMax ){ + yypParser->yyidxMax = yypParser->yyidx; + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yyidx>=YYSTACKDEPTH ){ + yyStackOverflow(yypParser, yypMinor); + return; + } +#else + if( yypParser->yyidx>=yypParser->yystksz ){ + yyGrowStack(yypParser); + if( yypParser->yyidx>=yypParser->yystksz ){ + yyStackOverflow(yypParser, yypMinor); + return; + } + } +#endif + yytos = &yypParser->yystack[yypParser->yyidx]; + yytos->stateno = (YYACTIONTYPE)yyNewState; + yytos->major = (YYCODETYPE)yyMajor; + yytos->minor = *yypMinor; + yyTraceShift(yypParser, yyNewState); +} + +/* The following table contains information about every rule that +** is used during the reduce. +*/ +static const struct { + YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ + unsigned char nrhs; /* Number of right-hand side symbols in the rule */ +} yyRuleInfo[] = { + { 187, 1 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 188, 2 }, + { 189, 0 }, + { 189, 2 }, + { 191, 0 }, + { 191, 2 }, + { 188, 3 }, + { 188, 5 }, + { 188, 3 }, + { 188, 5 }, + { 188, 3 }, + { 188, 5 }, + { 188, 4 }, + { 188, 3 }, + { 188, 2 }, + { 188, 3 }, + { 188, 5 }, + { 188, 5 }, + { 188, 4 }, + { 188, 5 }, + { 188, 3 }, + { 188, 4 }, + { 190, 1 }, + { 190, 1 }, + { 192, 2 }, + { 192, 0 }, + { 194, 3 }, + { 194, 0 }, + { 188, 5 }, + { 188, 5 }, + { 196, 0 }, + { 196, 2 }, + { 198, 0 }, + { 198, 2 }, + { 199, 0 }, + { 199, 2 }, + { 200, 2 }, + { 200, 0 }, + { 201, 2 }, + { 201, 0 }, + { 202, 2 }, + { 202, 0 }, + { 203, 2 }, + { 203, 0 }, + { 204, 2 }, + { 204, 0 }, + { 205, 2 }, + { 205, 0 }, + { 206, 2 }, + { 206, 0 }, + { 207, 2 }, + { 207, 0 }, + { 208, 2 }, + { 208, 0 }, + { 195, 12 }, + { 193, 1 }, + { 209, 1 }, + { 209, 4 }, + { 210, 1 }, + { 210, 2 }, + { 210, 2 }, + { 188, 6 }, + { 211, 3 }, + { 211, 7 }, + { 211, 7 }, + { 211, 2 }, + { 212, 3 }, + { 212, 1 }, + { 214, 2 }, + { 197, 3 }, + { 197, 1 }, + { 215, 1 }, + { 215, 1 }, + { 215, 1 }, + { 215, 1 }, + { 215, 1 }, + { 215, 2 }, + { 215, 2 }, + { 188, 1 }, + { 213, 12 }, + { 227, 2 }, + { 227, 0 }, + { 216, 3 }, + { 216, 2 }, + { 216, 4 }, + { 229, 2 }, + { 229, 1 }, + { 229, 0 }, + { 217, 3 }, + { 230, 1 }, + { 219, 4 }, + { 219, 0 }, + { 220, 0 }, + { 220, 6 }, + { 220, 4 }, + { 221, 4 }, + { 221, 0 }, + { 223, 0 }, + { 223, 3 }, + { 231, 4 }, + { 231, 2 }, + { 233, 1 }, + { 234, 1 }, + { 234, 1 }, + { 234, 0 }, + { 222, 0 }, + { 222, 3 }, + { 235, 3 }, + { 235, 1 }, + { 224, 0 }, + { 224, 2 }, + { 226, 0 }, + { 226, 2 }, + { 226, 4 }, + { 226, 4 }, + { 225, 0 }, + { 225, 2 }, + { 225, 4 }, + { 225, 4 }, + { 218, 0 }, + { 218, 2 }, + { 228, 3 }, + { 228, 1 }, + { 228, 3 }, + { 228, 1 }, + { 228, 2 }, + { 228, 2 }, + { 228, 1 }, + { 228, 2 }, + { 228, 2 }, + { 228, 1 }, + { 228, 1 }, + { 228, 1 }, + { 228, 1 }, + { 228, 4 }, + { 228, 4 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 3 }, + { 228, 5 }, + { 236, 3 }, + { 236, 1 }, + { 237, 1 }, + { 237, 0 }, + { 188, 4 }, + { 238, 4 }, + { 238, 5 }, + { 239, 3 }, + { 239, 1 }, + { 188, 3 }, + { 188, 7 }, + { 188, 7 }, + { 188, 7 }, + { 188, 7 }, + { 188, 8 }, + { 188, 8 }, + { 188, 5 }, + { 188, 7 }, + { 188, 7 }, +}; + +static void yy_accept(yyParser*); /* Forward Declaration */ + +/* +** Perform a reduce action and the shift that must immediately +** follow the reduce. +*/ +static void yy_reduce( + yyParser *yypParser, /* The parser */ + int yyruleno /* Number of the rule by which to reduce */ +){ + int yygoto; /* The next state */ + int yyact; /* The next action */ + YYMINORTYPE yygotominor; /* The LHS of the rule reduced */ + yyStackEntry *yymsp; /* The top of the parser's stack */ + int yysize; /* Amount to pop the stack */ + ParseARG_FETCH; + yymsp = &yypParser->yystack[yypParser->yyidx]; +#ifndef NDEBUG + if( yyTraceFILE && yyruleno>=0 + && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ + yysize = yyRuleInfo[yyruleno].nrhs; + fprintf(yyTraceFILE, "%sReduce [%s], go to state %d.\n", yyTracePrompt, + yyRuleName[yyruleno], yymsp[-yysize].stateno); + } +#endif /* NDEBUG */ + yygotominor = yyzerominor; + + switch( yyruleno ){ + /* Beginning here are the reduction cases. A typical example + ** follows: + ** case 0: + ** #line + ** { ... } // User supplied code + ** #line + ** break; + */ +/********** Begin reduce actions **********************************************/ + case 0: /* program ::= cmd */ +{} + break; + case 1: /* cmd ::= SHOW DATABASES */ +{ setDCLSQLElems(pInfo, SHOW_DATABASES, 0);} + break; + case 2: /* cmd ::= SHOW MNODES */ +{ setDCLSQLElems(pInfo, SHOW_MNODES, 0);} + break; + case 3: /* cmd ::= SHOW DNODES */ +{ setDCLSQLElems(pInfo, SHOW_DNODES, 0);} + break; + case 4: /* cmd ::= SHOW USERS */ +{ setDCLSQLElems(pInfo, SHOW_USERS, 0);} + break; + case 5: /* cmd ::= SHOW MODULES */ +{ setDCLSQLElems(pInfo, SHOW_MODULES, 0); } + break; + case 6: /* cmd ::= SHOW QUERIES */ +{ setDCLSQLElems(pInfo, SHOW_QUERIES, 0); } + break; + case 7: /* cmd ::= SHOW CONNECTIONS */ +{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);} + break; + case 8: /* cmd ::= SHOW STREAMS */ +{ setDCLSQLElems(pInfo, SHOW_STREAMS, 0); } + break; + case 9: /* cmd ::= SHOW CONFIGS */ +{ setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); } + break; + case 10: /* cmd ::= SHOW SCORES */ +{ setDCLSQLElems(pInfo, SHOW_SCORES, 0); } + break; + case 11: /* cmd ::= SHOW GRANTS */ +{ setDCLSQLElems(pInfo, SHOW_GRANTS, 0); } + break; + case 12: /* dbPrefix ::= */ + case 35: /* ifexists ::= */ yytestcase(yyruleno==35); + case 37: /* ifnotexists ::= */ yytestcase(yyruleno==37); +{yygotominor.yy0.n = 0;} + break; + case 13: /* dbPrefix ::= ids DOT */ +{yygotominor.yy0 = yymsp[-1].minor.yy0; } + break; + case 14: /* cpxName ::= */ +{yygotominor.yy0.n = 0; } + break; + case 15: /* cpxName ::= DOT ids */ +{yygotominor.yy0 = yymsp[0].minor.yy0; yygotominor.yy0.n += 1; } + break; + case 16: /* cmd ::= SHOW dbPrefix TABLES */ +{ + setDCLSQLElems(pInfo, SHOW_TABLES, 1, &yymsp[-1].minor.yy0); +} + break; + case 17: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ +{ + setDCLSQLElems(pInfo, SHOW_TABLES, 2, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); +} + break; + case 18: /* cmd ::= SHOW dbPrefix STABLES */ +{ + setDCLSQLElems(pInfo, SHOW_STABLES, 1, &yymsp[-1].minor.yy0); +} + break; + case 19: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ +{ + SSQLToken token; + setDBName(&token, &yymsp[-3].minor.yy0); + setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &yymsp[0].minor.yy0); +} + break; + case 20: /* cmd ::= SHOW dbPrefix VGROUPS */ +{ + SSQLToken token; + setDBName(&token, &yymsp[-1].minor.yy0); + setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token); +} + break; + case 21: /* cmd ::= DROP TABLE ifexists ids cpxName */ +{ + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + setDCLSQLElems(pInfo, DROP_TABLE, 2, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); +} + break; + case 22: /* cmd ::= DROP DATABASE ifexists ids */ +{ setDCLSQLElems(pInfo, DROP_DATABASE, 2, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } + break; + case 23: /* cmd ::= DROP USER ids */ +{ setDCLSQLElems(pInfo, DROP_USER, 1, &yymsp[0].minor.yy0); } + break; + case 24: /* cmd ::= USE ids */ +{ setDCLSQLElems(pInfo, USE_DATABASE, 1, &yymsp[0].minor.yy0);} + break; + case 25: /* cmd ::= DESCRIBE ids cpxName */ +{ + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); +} + break; + case 26: /* cmd ::= ALTER USER ids PASS ids */ +{ setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } + break; + case 27: /* cmd ::= ALTER USER ids PRIVILEGE ids */ +{ setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} + break; + case 28: /* cmd ::= ALTER DNODE IP ids */ +{ setDCLSQLElems(pInfo, ALTER_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } + break; + case 29: /* cmd ::= ALTER DNODE IP ids ids */ +{ setDCLSQLElems(pInfo, ALTER_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } + break; + case 30: /* cmd ::= ALTER LOCAL ids */ +{ setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &yymsp[0].minor.yy0); } + break; + case 31: /* cmd ::= ALTER DATABASE ids alter_db_optr */ +{ SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy54, &t);} + break; + case 32: /* ids ::= ID */ + case 33: /* ids ::= STRING */ yytestcase(yyruleno==33); +{yygotominor.yy0 = yymsp[0].minor.yy0; } + break; + case 34: /* ifexists ::= IF EXISTS */ + case 36: /* ifnotexists ::= IF NOT EXISTS */ yytestcase(yyruleno==36); +{yygotominor.yy0.n = 1;} + break; + case 38: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ +{ setCreateDBSQL(pInfo, CREATE_DATABASE, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy54, &yymsp[-2].minor.yy0);} + break; + case 39: /* cmd ::= CREATE USER ids PASS ids */ +{ setDCLSQLElems(pInfo, CREATE_USER, 2, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} + break; + case 40: /* keep ::= */ + case 102: /* fill_opt ::= */ yytestcase(yyruleno==102); +{yygotominor.yy216 = 0; } + break; + case 41: /* keep ::= KEEP tagitemlist */ +{yygotominor.yy216 = yymsp[0].minor.yy216; } + break; + case 42: /* replica ::= */ + case 44: /* day ::= */ yytestcase(yyruleno==44); + case 47: /* rows ::= */ yytestcase(yyruleno==47); + case 49: /* cache ::= */ yytestcase(yyruleno==49); + case 51: /* ablocks ::= */ yytestcase(yyruleno==51); + case 53: /* tblocks ::= */ yytestcase(yyruleno==53); + case 55: /* tables ::= */ yytestcase(yyruleno==55); + case 57: /* ctime ::= */ yytestcase(yyruleno==57); + case 59: /* clog ::= */ yytestcase(yyruleno==59); + case 61: /* comp ::= */ yytestcase(yyruleno==61); + case 63: /* prec ::= */ yytestcase(yyruleno==63); + case 101: /* interval_opt ::= */ yytestcase(yyruleno==101); + case 106: /* sliding_opt ::= */ yytestcase(yyruleno==106); +{yygotominor.yy0.n = 0; } + break; + case 43: /* replica ::= REPLICA INTEGER */ + case 45: /* day ::= DAYS INTEGER */ yytestcase(yyruleno==45); + case 46: /* rows ::= ROWS INTEGER */ yytestcase(yyruleno==46); + case 48: /* cache ::= CACHE INTEGER */ yytestcase(yyruleno==48); + case 50: /* ablocks ::= ABLOCKS ID */ yytestcase(yyruleno==50); + case 52: /* tblocks ::= TBLOCKS INTEGER */ yytestcase(yyruleno==52); + case 54: /* tables ::= TABLES INTEGER */ yytestcase(yyruleno==54); + case 56: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==56); + case 58: /* clog ::= CLOG INTEGER */ yytestcase(yyruleno==58); + case 60: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==60); + case 62: /* prec ::= PRECISION ids */ yytestcase(yyruleno==62); +{yygotominor.yy0 = yymsp[0].minor.yy0; } + break; + case 64: /* db_optr ::= replica day keep rows cache ablocks tblocks tables ctime clog comp prec */ +{ + yygotominor.yy54.nReplica = (yymsp[-11].minor.yy0.n > 0)? atoi(yymsp[-11].minor.yy0.z):-1; + yygotominor.yy54.nDays = (yymsp[-10].minor.yy0.n > 0)? atoi(yymsp[-10].minor.yy0.z):-1; + yygotominor.yy54.nRowsInFileBlock = (yymsp[-8].minor.yy0.n > 0)? atoi(yymsp[-8].minor.yy0.z):-1; + + yygotominor.yy54.nCacheBlockSize = (yymsp[-7].minor.yy0.n > 0)? atoi(yymsp[-7].minor.yy0.z):-1; + yygotominor.yy54.nCacheNumOfBlocks = (yymsp[-6].minor.yy0.n > 0)? strtod(yymsp[-6].minor.yy0.z, NULL):-1; + yygotominor.yy54.numOfBlocksPerTable = (yymsp[-5].minor.yy0.n > 0)? atoi(yymsp[-5].minor.yy0.z):-1; + yygotominor.yy54.nTablesPerVnode = (yymsp[-4].minor.yy0.n > 0)? atoi(yymsp[-4].minor.yy0.z):-1; + yygotominor.yy54.commitTime = (yymsp[-3].minor.yy0.n > 0)? atoi(yymsp[-3].minor.yy0.z):-1; + yygotominor.yy54.commitLog = (yymsp[-2].minor.yy0.n > 0)? atoi(yymsp[-2].minor.yy0.z):-1; + yygotominor.yy54.compressionLevel = (yymsp[-1].minor.yy0.n > 0)? atoi(yymsp[-1].minor.yy0.z):-1; + + yygotominor.yy54.keep = yymsp[-9].minor.yy216; + yygotominor.yy54.precision = yymsp[0].minor.yy0; +} + break; + case 65: /* alter_db_optr ::= replica */ +{ + yygotominor.yy54.nReplica = (yymsp[0].minor.yy0.n > 0)? atoi(yymsp[0].minor.yy0.z):0; +} + break; + case 66: /* typename ::= ids */ +{ tSQLSetColumnType (&yygotominor.yy343, &yymsp[0].minor.yy0); } + break; + case 67: /* typename ::= ids LP signed RP */ +{ + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy412; // negative value of name length + tSQLSetColumnType(&yygotominor.yy343, &yymsp[-3].minor.yy0); +} + break; + case 68: /* signed ::= INTEGER */ +{ yygotominor.yy412 = atoi(yymsp[0].minor.yy0.z); } + break; + case 69: /* signed ::= PLUS INTEGER */ +{ yygotominor.yy412 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + break; + case 70: /* signed ::= MINUS INTEGER */ +{ yygotominor.yy412 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} + break; + case 71: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ +{ + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + setCreatedMeterName(pInfo, &yymsp[-2].minor.yy0, &yymsp[-3].minor.yy0); +} + break; + case 72: /* create_table_args ::= LP columnlist RP */ +{ + yygotominor.yy278 = tSetCreateSQLElems(yymsp[-1].minor.yy151, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER); + setSQLInfo(pInfo, yygotominor.yy278, NULL, TSQL_CREATE_NORMAL_METER); +} + break; + case 73: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ +{ + yygotominor.yy278 = tSetCreateSQLElems(yymsp[-5].minor.yy151, yymsp[-1].minor.yy151, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC); + setSQLInfo(pInfo, yygotominor.yy278, NULL, TSQL_CREATE_NORMAL_METRIC); +} + break; + case 74: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ +{ + yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; + yygotominor.yy278 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy216, NULL, TSQL_CREATE_METER_FROM_METRIC); + setSQLInfo(pInfo, yygotominor.yy278, NULL, TSQL_CREATE_METER_FROM_METRIC); +} + break; + case 75: /* create_table_args ::= AS select */ +{ + yygotominor.yy278 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy24, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, yygotominor.yy278, NULL, TSQL_CREATE_STREAM); +} + break; + case 76: /* columnlist ::= columnlist COMMA column */ +{yygotominor.yy151 = tFieldListAppend(yymsp[-2].minor.yy151, &yymsp[0].minor.yy343); } + break; + case 77: /* columnlist ::= column */ +{yygotominor.yy151 = tFieldListAppend(NULL, &yymsp[0].minor.yy343);} + break; + case 78: /* column ::= ids typename */ +{ + tSQLSetColumnInfo(&yygotominor.yy343, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy343); +} + break; + case 79: /* tagitemlist ::= tagitemlist COMMA tagitem */ +{ yygotominor.yy216 = tVariantListAppend(yymsp[-2].minor.yy216, &yymsp[0].minor.yy266, -1); } + break; + case 80: /* tagitemlist ::= tagitem */ +{ yygotominor.yy216 = tVariantListAppend(NULL, &yymsp[0].minor.yy266, -1); } + break; + case 81: /* tagitem ::= INTEGER */ + case 82: /* tagitem ::= FLOAT */ yytestcase(yyruleno==82); + case 83: /* tagitem ::= STRING */ yytestcase(yyruleno==83); + case 84: /* tagitem ::= BOOL */ yytestcase(yyruleno==84); +{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yygotominor.yy266, &yymsp[0].minor.yy0); } + break; + case 85: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = TK_STRING; toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yygotominor.yy266, &yymsp[0].minor.yy0); } + break; + case 86: /* tagitem ::= MINUS INTEGER */ + case 87: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==87); +{ + yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; + yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; + toTSDBType(yymsp[-1].minor.yy0.type); + tVariantCreate(&yygotominor.yy266, &yymsp[-1].minor.yy0); +} + break; + case 88: /* cmd ::= select */ +{ + setSQLInfo(pInfo, yymsp[0].minor.yy24, NULL, TSQL_QUERY_METER); +} + break; + case 89: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ +{ + yygotominor.yy24 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy98, &yymsp[-9].minor.yy0, yymsp[-8].minor.yy370, yymsp[-4].minor.yy216, yymsp[-3].minor.yy216, &yymsp[-7].minor.yy0, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy216, &yymsp[0].minor.yy294, &yymsp[-1].minor.yy294); +} + break; + case 90: /* sclp ::= selcollist COMMA */ +{yygotominor.yy98 = yymsp[-1].minor.yy98;} + break; + case 91: /* sclp ::= */ +{yygotominor.yy98 = 0;} + break; + case 92: /* selcollist ::= sclp expr as */ +{ + yygotominor.yy98 = tSQLExprListAppend(yymsp[-2].minor.yy98, yymsp[-1].minor.yy370, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); +} + break; + case 93: /* selcollist ::= sclp STAR */ +{ + tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); + yygotominor.yy98 = tSQLExprListAppend(yymsp[-1].minor.yy98, pNode, 0); +} + break; + case 94: /* selcollist ::= sclp ID DOT STAR */ +{ + tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); + yygotominor.yy98 = tSQLExprListAppend(yymsp[-3].minor.yy98, pNode, 0); +} + break; + case 95: /* as ::= AS ids */ + case 96: /* as ::= ids */ yytestcase(yyruleno==96); +{ yygotominor.yy0 = yymsp[0].minor.yy0; } + break; + case 97: /* as ::= */ +{ yygotominor.yy0.n = 0; } + break; + case 98: /* from ::= FROM ids cpxName */ +{yygotominor.yy0 = yymsp[-1].minor.yy0; yygotominor.yy0.n += yymsp[0].minor.yy0.n;} + break; + case 99: /* tmvar ::= VARIABLE */ +{yygotominor.yy0 = yymsp[0].minor.yy0;} + break; + case 100: /* interval_opt ::= INTERVAL LP tmvar RP */ + case 105: /* sliding_opt ::= SLIDING LP tmvar RP */ yytestcase(yyruleno==105); +{yygotominor.yy0 = yymsp[-1].minor.yy0; } + break; + case 103: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ +{ + tVariant A = {0}; + toTSDBType(yymsp[-3].minor.yy0.type); + tVariantCreate(&A, &yymsp[-3].minor.yy0); + + tVariantListInsert(yymsp[-1].minor.yy216, &A, -1, 0); + yygotominor.yy216 = yymsp[-1].minor.yy216; +} + break; + case 104: /* fill_opt ::= FILL LP ID RP */ +{ + tVariant A = {0}; + toTSDBType(yymsp[-1].minor.yy0.type); + tVariantCreate(&A, &yymsp[-1].minor.yy0); + + yygotominor.yy216 = tVariantListAppend(NULL, &A, -1); +} + break; + case 107: /* orderby_opt ::= */ + case 115: /* groupby_opt ::= */ yytestcase(yyruleno==115); +{yygotominor.yy216 = 0;} + break; + case 108: /* orderby_opt ::= ORDER BY sortlist */ + case 116: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==116); +{yygotominor.yy216 = yymsp[0].minor.yy216;} + break; + case 109: /* sortlist ::= sortlist COMMA item sortorder */ +{ + yygotominor.yy216 = tVariantListAppend(yymsp[-3].minor.yy216, &yymsp[-1].minor.yy266, yymsp[0].minor.yy412); +} + break; + case 110: /* sortlist ::= item sortorder */ +{ + yygotominor.yy216 = tVariantListAppend(NULL, &yymsp[-1].minor.yy266, yymsp[0].minor.yy412); +} + break; + case 111: /* item ::= ids */ +{ + toTSDBType(yymsp[0].minor.yy0.type); + tVariantCreate(&yygotominor.yy266, &yymsp[0].minor.yy0); +} + break; + case 112: /* sortorder ::= ASC */ +{yygotominor.yy412 = TSQL_SO_ASC; } + break; + case 113: /* sortorder ::= DESC */ +{yygotominor.yy412 = TSQL_SO_DESC;} + break; + case 114: /* sortorder ::= */ +{yygotominor.yy412 = TSQL_SO_ASC;} + break; + case 117: /* grouplist ::= grouplist COMMA item */ +{ + yygotominor.yy216 = tVariantListAppend(yymsp[-2].minor.yy216, &yymsp[0].minor.yy266, -1); +} + break; + case 118: /* grouplist ::= item */ +{ + yygotominor.yy216 = tVariantListAppend(NULL, &yymsp[0].minor.yy266, -1); +} + break; + case 119: /* having_opt ::= */ + case 129: /* where_opt ::= */ yytestcase(yyruleno==129); + case 164: /* expritem ::= */ yytestcase(yyruleno==164); +{yygotominor.yy370 = 0;} + break; + case 120: /* having_opt ::= HAVING expr */ + case 130: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==130); + case 163: /* expritem ::= expr */ yytestcase(yyruleno==163); +{yygotominor.yy370 = yymsp[0].minor.yy370;} + break; + case 121: /* limit_opt ::= */ + case 125: /* slimit_opt ::= */ yytestcase(yyruleno==125); +{yygotominor.yy294.limit = -1; yygotominor.yy294.offset = 0;} + break; + case 122: /* limit_opt ::= LIMIT signed */ + case 126: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==126); +{yygotominor.yy294.limit = yymsp[0].minor.yy412; yygotominor.yy294.offset = 0;} + break; + case 123: /* limit_opt ::= LIMIT signed OFFSET signed */ + case 127: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ yytestcase(yyruleno==127); +{yygotominor.yy294.limit = yymsp[-2].minor.yy412; yygotominor.yy294.offset = yymsp[0].minor.yy412;} + break; + case 124: /* limit_opt ::= LIMIT signed COMMA signed */ + case 128: /* slimit_opt ::= SLIMIT signed COMMA signed */ yytestcase(yyruleno==128); +{yygotominor.yy294.limit = yymsp[0].minor.yy412; yygotominor.yy294.offset = yymsp[-2].minor.yy412;} + break; + case 131: /* expr ::= LP expr RP */ +{yygotominor.yy370 = yymsp[-1].minor.yy370; } + break; + case 132: /* expr ::= ID */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + break; + case 133: /* expr ::= ID DOT ID */ +{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + break; + case 134: /* expr ::= INTEGER */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + break; + case 135: /* expr ::= MINUS INTEGER */ + case 136: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==136); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + break; + case 137: /* expr ::= FLOAT */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + break; + case 138: /* expr ::= MINUS FLOAT */ + case 139: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==139); +{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + break; + case 140: /* expr ::= STRING */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + break; + case 141: /* expr ::= NOW */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + break; + case 142: /* expr ::= VARIABLE */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + break; + case 143: /* expr ::= BOOL */ +{yygotominor.yy370 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + break; + case 144: /* expr ::= ID LP exprlist RP */ +{ + yygotominor.yy370 = tSQLExprCreateFunction(yymsp[-1].minor.yy98, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); +} + break; + case 145: /* expr ::= ID LP STAR RP */ +{ + yygotominor.yy370 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); +} + break; + case 146: /* expr ::= expr AND expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_AND);} + break; + case 147: /* expr ::= expr OR expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_OR); } + break; + case 148: /* expr ::= expr LT expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_LT);} + break; + case 149: /* expr ::= expr GT expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_GT);} + break; + case 150: /* expr ::= expr LE expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_LE);} + break; + case 151: /* expr ::= expr GE expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_GE);} + break; + case 152: /* expr ::= expr NE expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_NE);} + break; + case 153: /* expr ::= expr EQ expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_EQ);} + break; + case 154: /* expr ::= expr PLUS expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_PLUS); } + break; + case 155: /* expr ::= expr MINUS expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_MINUS); } + break; + case 156: /* expr ::= expr STAR expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_STAR); } + break; + case 157: /* expr ::= expr SLASH expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_DIVIDE);} + break; + case 158: /* expr ::= expr REM expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_REM); } + break; + case 159: /* expr ::= expr LIKE expr */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_LIKE); } + break; + case 160: /* expr ::= expr IN LP exprlist RP */ +{yygotominor.yy370 = tSQLExprCreate(yymsp[-4].minor.yy370, (tSQLExpr*)yymsp[-1].minor.yy98, TK_IN); } + break; + case 161: /* exprlist ::= exprlist COMMA expritem */ + case 168: /* itemlist ::= itemlist COMMA expr */ yytestcase(yyruleno==168); +{yygotominor.yy98 = tSQLExprListAppend(yymsp[-2].minor.yy98,yymsp[0].minor.yy370,0);} + break; + case 162: /* exprlist ::= expritem */ + case 169: /* itemlist ::= expr */ yytestcase(yyruleno==169); +{yygotominor.yy98 = tSQLExprListAppend(0,yymsp[0].minor.yy370,0);} + break; + case 165: /* cmd ::= INSERT INTO cpxName insert_value_list */ +{ + tSetInsertSQLElems(pInfo, &yymsp[-1].minor.yy0, yymsp[0].minor.yy434); +} + break; + case 166: /* insert_value_list ::= VALUES LP itemlist RP */ +{yygotominor.yy434 = tSQLListListAppend(NULL, yymsp[-1].minor.yy98);} + break; + case 167: /* insert_value_list ::= insert_value_list VALUES LP itemlist RP */ +{yygotominor.yy434 = tSQLListListAppend(yymsp[-4].minor.yy434, yymsp[-1].minor.yy98);} + break; + case 170: /* cmd ::= RESET QUERY CACHE */ +{ setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);} + break; + case 171: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ +{ + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy151, NULL, ALTER_TABLE_ADD_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN); +} + break; + case 172: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ +{ + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + + toTSDBType(yymsp[0].minor.yy0.type); + + tVariant V; + tVariantCreate(&V, &yymsp[0].minor.yy0); + + tVariantList* K = tVariantListAppend(NULL, &V, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, ALTER_TABLE_DROP_COLUMN); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN); +} + break; + case 173: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ +{ + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy151, NULL, ALTER_TABLE_TAGS_ADD); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD); +} + break; + case 174: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ +{ + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + + toTSDBType(yymsp[0].minor.yy0.type); + + tVariant V; + tVariantCreate(&V, &yymsp[0].minor.yy0); + + tVariantList* A = tVariantListAppend(NULL, &V, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, ALTER_TABLE_TAGS_DROP); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP); +} + break; + case 175: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ +{ + yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; + + tVariant V; + toTSDBType(yymsp[-1].minor.yy0.type); + tVariantCreate(&V, &yymsp[-1].minor.yy0); + + tVariantList* A = tVariantListAppend(NULL, &V, -1); + + toTSDBType(yymsp[0].minor.yy0.type); + tVariantCreate(&V, &yymsp[0].minor.yy0); + A = tVariantListAppend(A, &V, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, ALTER_TABLE_TAGS_CHG); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG); +} + break; + case 176: /* cmd ::= ALTER TABLE ids cpxName SET ids EQ tagitem */ +{ + yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; + + tVariant V; + toTSDBType(yymsp[-2].minor.yy0.type); + tVariantCreate(&V, &yymsp[-2].minor.yy0); + + tVariantList* A = tVariantListAppend(NULL, &V, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy266, -1); + + SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, ALTER_TABLE_TAGS_SET); + setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET); +} + break; + case 177: /* cmd ::= KILL CONNECTION IP COLON INTEGER */ +{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &yymsp[-2].minor.yy0);} + break; + case 178: /* cmd ::= KILL STREAM IP COLON INTEGER COLON INTEGER */ +{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &yymsp[-4].minor.yy0);} + break; + case 179: /* cmd ::= KILL QUERY IP COLON INTEGER COLON INTEGER */ +{yymsp[-4].minor.yy0.n += (yymsp[-3].minor.yy0.n + yymsp[-2].minor.yy0.n + yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &yymsp[-4].minor.yy0);} + break; + default: + break; +/********** End reduce actions ************************************************/ + }; + assert( yyruleno>=0 && yyrulenoyyidx -= yysize; + yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); + if( yyact <= YY_MAX_SHIFTREDUCE ){ + if( yyact>YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + /* If the reduce action popped at least + ** one element off the stack, then we can push the new element back + ** onto the stack here, and skip the stack overflow test in yy_shift(). + ** That gives a significant speed improvement. */ + if( yysize ){ + yypParser->yyidx++; + yymsp -= yysize-1; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; + yymsp->minor = yygotominor; + yyTraceShift(yypParser, yyact); + }else{ + yy_shift(yypParser,yyact,yygoto,&yygotominor); + } + }else{ + assert( yyact == YY_ACCEPT_ACTION ); + yy_accept(yypParser); + } +} + +/* +** The following code executes when the parse fails +*/ +#ifndef YYNOERRORRECOVERY +static void yy_parse_failed( + yyParser *yypParser /* The parser */ +){ + ParseARG_FETCH; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); + } +#endif + while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will be executed whenever the + ** parser fails */ +/************ Begin %parse_failure code ***************************************/ +/************ End %parse_failure code *****************************************/ + ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ +} +#endif /* YYNOERRORRECOVERY */ + +/* +** The following code executes when a syntax error first occurs. +*/ +static void yy_syntax_error( + yyParser *yypParser, /* The parser */ + int yymajor, /* The major type of the error token */ + YYMINORTYPE yyminor /* The minor type of the error token */ +){ + ParseARG_FETCH; +#define TOKEN (yyminor.yy0) +/************ Begin %syntax_error code ****************************************/ + + pInfo->validSql = false; + int32_t outputBufLen = tListLen(pInfo->pzErrMsg); + int32_t len = 0; + + if(TOKEN.z) { + char msg[] = "syntax error near \"%s\""; + int32_t sqlLen = strlen(&TOKEN.z[0]); + + if (sqlLen + sizeof(msg)/sizeof(msg[0]) + 1 > outputBufLen) { + char tmpstr[128] = {0}; + memcpy(tmpstr, &TOKEN.z[0], sizeof(tmpstr)/sizeof(tmpstr[0]) - 1); + len = sprintf(pInfo->pzErrMsg, msg, tmpstr); + } else { + len = sprintf(pInfo->pzErrMsg, msg, &TOKEN.z[0]); + } + + } else { + len = sprintf(pInfo->pzErrMsg, "Incomplete SQL statement"); + } + + assert(len <= outputBufLen); +/************ End %syntax_error code ******************************************/ + ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ +} + +/* +** The following is executed when the parser accepts +*/ +static void yy_accept( + yyParser *yypParser /* The parser */ +){ + ParseARG_FETCH; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); + } +#endif + while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will be executed whenever the + ** parser accepts */ +/*********** Begin %parse_accept code *****************************************/ + +/*********** End %parse_accept code *******************************************/ + ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ +} + +/* The main parser program. +** The first argument is a pointer to a structure obtained from +** "ParseAlloc" which describes the current state of the parser. +** The second argument is the major token number. The third is +** the minor token. The fourth optional argument is whatever the +** user wants (and specified in the grammar) and is available for +** use by the action routines. +** +** Inputs: +**
    +**
  • A pointer to the parser (an opaque structure.) +**
  • The major token number. +**
  • The minor token number. +**
  • An option argument of a grammar-specified type. +**
+** +** Outputs: +** None. +*/ +void Parse( + void *yyp, /* The parser */ + int yymajor, /* The major token code number */ + ParseTOKENTYPE yyminor /* The value for the token */ + ParseARG_PDECL /* Optional %extra_argument parameter */ +){ + YYMINORTYPE yyminorunion; + int yyact; /* The parser action. */ +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + int yyendofinput; /* True if we are at the end of input */ +#endif +#ifdef YYERRORSYMBOL + int yyerrorhit = 0; /* True if yymajor has invoked an error */ +#endif + yyParser *yypParser; /* The parser */ + + /* (re)initialize the parser, if necessary */ + yypParser = (yyParser*)yyp; + if( yypParser->yyidx<0 ){ +#if YYSTACKDEPTH<=0 + if( yypParser->yystksz <=0 ){ + /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/ + yyminorunion = yyzerominor; + yyStackOverflow(yypParser, &yyminorunion); + return; + } +#endif + yypParser->yyidx = 0; + yypParser->yyerrcnt = -1; + yypParser->yystack[0].stateno = 0; + yypParser->yystack[0].major = 0; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sInitialize. Empty stack. State 0\n", + yyTracePrompt); + } +#endif + } + yyminorunion.yy0 = yyminor; +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + yyendofinput = (yymajor==0); +#endif + ParseARG_STORE; + +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sInput '%s'\n",yyTracePrompt,yyTokenName[yymajor]); + } +#endif + + do{ + yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); + if( yyact <= YY_MAX_SHIFTREDUCE ){ + if( yyact > YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + yy_shift(yypParser,yyact,yymajor,&yyminorunion); + yypParser->yyerrcnt--; + yymajor = YYNOCODE; + }else if( yyact <= YY_MAX_REDUCE ){ + yy_reduce(yypParser,yyact-YY_MIN_REDUCE); + }else{ + assert( yyact == YY_ERROR_ACTION ); +#ifdef YYERRORSYMBOL + int yymx; +#endif +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); + } +#endif +#ifdef YYERRORSYMBOL + /* A syntax error has occurred. + ** The response to an error depends upon whether or not the + ** grammar defines an error token "ERROR". + ** + ** This is what we do if the grammar does define ERROR: + ** + ** * Call the %syntax_error function. + ** + ** * Begin popping the stack until we enter a state where + ** it is legal to shift the error symbol, then shift + ** the error symbol. + ** + ** * Set the error count to three. + ** + ** * Begin accepting and shifting new tokens. No new error + ** processing will occur until three tokens have been + ** shifted successfully. + ** + */ + if( yypParser->yyerrcnt<0 ){ + yy_syntax_error(yypParser,yymajor,yyminorunion); + } + yymx = yypParser->yystack[yypParser->yyidx].major; + if( yymx==YYERRORSYMBOL || yyerrorhit ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sDiscard input token %s\n", + yyTracePrompt,yyTokenName[yymajor]); + } +#endif + yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion); + yymajor = YYNOCODE; + }else{ + while( + yypParser->yyidx >= 0 && + yymx != YYERRORSYMBOL && + (yyact = yy_find_reduce_action( + yypParser->yystack[yypParser->yyidx].stateno, + YYERRORSYMBOL)) >= YY_MIN_REDUCE + ){ + yy_pop_parser_stack(yypParser); + } + if( yypParser->yyidx < 0 || yymajor==0 ){ + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yy_parse_failed(yypParser); + yymajor = YYNOCODE; + }else if( yymx!=YYERRORSYMBOL ){ + YYMINORTYPE u2; + u2.YYERRSYMDT = 0; + yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2); + } + } + yypParser->yyerrcnt = 3; + yyerrorhit = 1; +#elif defined(YYNOERRORRECOVERY) + /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to + ** do any kind of error recovery. Instead, simply invoke the syntax + ** error routine and continue going as if nothing had happened. + ** + ** Applications can set this macro (for example inside %include) if + ** they intend to abandon the parse upon the first syntax error seen. + */ + yy_syntax_error(yypParser,yymajor,yyminorunion); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yymajor = YYNOCODE; + +#else /* YYERRORSYMBOL is not defined */ + /* This is what we do if the grammar does not define ERROR: + ** + ** * Report an error message, and throw away the input token. + ** + ** * If the input token is $, then fail the parse. + ** + ** As before, subsequent error messages are suppressed until + ** three input tokens have been successfully shifted. + */ + if( yypParser->yyerrcnt<=0 ){ + yy_syntax_error(yypParser,yymajor,yyminorunion); + } + yypParser->yyerrcnt = 3; + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + if( yyendofinput ){ + yy_parse_failed(yypParser); + } + yymajor = YYNOCODE; +#endif + } + }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); +#ifndef NDEBUG + if( yyTraceFILE ){ + int i; + fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); + for(i=1; i<=yypParser->yyidx; i++) + fprintf(yyTraceFILE,"%c%s", i==1 ? '[' : ' ', + yyTokenName[yypParser->yystack[i].major]); + fprintf(yyTraceFILE,"]\n"); + } +#endif + return; +} diff --git a/src/util/src/tbase64.c b/src/util/src/tbase64.c new file mode 100644 index 000000000000..02ec756e0446 --- /dev/null +++ b/src/util/src/tbase64.c @@ -0,0 +1,105 @@ +/** + * Copyright (c) 2006-2008 Apple Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +#include +#include +#include +#include + +// deprecated this file for bug prone +// base64 encode +static char basis_64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +char *base64_encode(const unsigned char *value, int vlen) { + unsigned char oval = 0; + char * result = (char *)malloc((size_t)(vlen * 4) / 3 + 10); + char * out = result; + while (vlen >= 3) { + *out++ = basis_64[value[0] >> 2]; + *out++ = basis_64[((value[0] << 4) & 0x30) | (value[1] >> 4)]; + *out++ = basis_64[((value[1] << 2) & 0x3C) | (value[2] >> 6)]; + *out++ = basis_64[value[2] & 0x3F]; + value += 3; + vlen -= 3; + } + if (vlen > 0) { + *out++ = basis_64[value[0] >> 2]; + oval = (value[0] << 4) & 0x30; + if (vlen > 1) oval |= value[1] >> 4; + *out++ = basis_64[oval]; + *out++ = (vlen < 2) ? '=' : basis_64[(value[1] << 2) & 0x3C]; + *out++ = '='; + } + *out = '\0'; + return result; +} + +// base64 decode +#define CHAR64(c) (((c) < 0 || (c) > 127) ? -1 : index_64[(c)]) +static signed char index_64[128] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1}; + +unsigned char *base64_decode(const char *value, int inlen, int *outlen) { + int c1, c2, c3, c4; + unsigned char *result = (unsigned char *)malloc((size_t)(inlen * 3) / 4 + 1); + unsigned char *out = result; + + *outlen = 0; + + while (1) { + if (value[0] == 0) { + *out = '\0'; + return result; + } + + // skip \r\n + if (value[0] == '\n' || value[0] == '\r') { + value += 1; + continue; + } + + c1 = value[0]; + if (CHAR64(c1) == -1) goto base64_decode_error; + c2 = value[1]; + if (CHAR64(c2) == -1) goto base64_decode_error; + c3 = value[2]; + if ((c3 != '=') && (CHAR64(c3) == -1)) goto base64_decode_error; + c4 = value[3]; + if ((c4 != '=') && (CHAR64(c4) == -1)) goto base64_decode_error; + + value += 4; + *out++ = (unsigned char)((CHAR64(c1) << 2) | (CHAR64(c2) >> 4)); + *outlen += 1; + if (c3 != '=') { + *out++ = (unsigned char)(((CHAR64(c2) << 4) & 0xf0) | (CHAR64(c3) >> 2)); + *outlen += 1; + if (c4 != '=') { + *out++ = (unsigned char)(((CHAR64(c3) << 6) & 0xc0) | CHAR64(c4)); + *outlen += 1; + } + } + } + +base64_decode_error: + free(result); + *result = 0; + *outlen = 0; + + return result; +} diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c new file mode 100644 index 000000000000..4396d9fda947 --- /dev/null +++ b/src/util/src/tcache.c @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "tcache.h" +#include "tlog.h" +#include "ttime.h" +#include "ttimer.h" +#include "tutil.h" + +#define HASH_VALUE_IN_TRASH (-1) + +/** + * todo: refactor to extract the hash table out of cache structure + */ +typedef struct SCacheStatis { + int64_t missCount; + int64_t hitCount; + int64_t totalAccess; + int64_t refreshCount; + int32_t numOfCollision; +} SCacheStatis; + +typedef struct _cache_node_t { + char * key; /* null-terminated string */ + struct _cache_node_t *prev; + struct _cache_node_t *next; + uint64_t time; + uint64_t signature; + + /* + * reference count for this object + * if this value is larger than 0, this value will never be released + */ + uint32_t refCount; + int32_t hashVal; /* the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash*/ + uint32_t nodeSize; /* allocated size for current SDataNode */ + char data[]; +} SDataNode; + +typedef int (*_hashFunc)(int, char *, uint32_t); + +typedef struct { + SDataNode **hashList; + int maxSessions; + int total; + + int64_t totalSize; /* total allocated buffer in this hash table, SCacheObj is not included. */ + int64_t refreshTime; + + /* + * to accommodate the old datanode which has the same key value of new one in hashList + * when an new node is put into cache, if an existed one with the same key: + * 1. if the old one does not be referenced, update it. + * 2. otherwise, move the old one to pTrash, add the new one. + * + * when the node in pTrash does not be referenced, it will be release at the expired time + */ + SDataNode *pTrash; + int numOfElemsInTrash; /* number of element in trash */ + + void *tmrCtrl; + void *pTimer; + + SCacheStatis statistics; + _hashFunc hashFp; + pthread_rwlock_t lock; +} SCacheObj; + +static FORCE_INLINE int32_t taosNormalHashTableLength(int32_t length) { + int32_t i = 4; + while (i < length) i = (i << 1); + return i; +} + +/** + * @param key key of object for hash, usually a null-terminated string + * @param keyLen length of key + * @param pData actually data. required a consecutive memory block, no pointer is allowed + * in pData. Pointer copy causes memory access error. + * @param size size of block + * @param lifespan total survial time from now + * @return SDataNode + */ +static SDataNode *taosCreateHashNode(const char *key, uint32_t keyLen, const char *pData, size_t dataSize, + uint64_t lifespan) { + size_t totalSize = dataSize + sizeof(SDataNode) + keyLen; + + SDataNode *pNewNode = calloc(1, totalSize); + if (pNewNode == NULL) { + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + memcpy(pNewNode->data, pData, dataSize); + + pNewNode->time = taosGetTimestampMs() + lifespan; + + pNewNode->key = pNewNode->data + dataSize; + strcpy(pNewNode->key, key); + + pNewNode->signature = (uint64_t)pNewNode; + pNewNode->nodeSize = (uint32_t)totalSize; + + return pNewNode; +} + +/** + * hash key function + * @param pObj cache object + * @param key key string + * @param len length of key + * @return hash value + */ +static FORCE_INLINE int taosHashKey(int maxSessions, char *key, uint32_t len) { + uint32_t hash = MurmurHash3_32(key, len); + + /* avoid the costly remainder operation */ + assert((maxSessions & (maxSessions - 1)) == 0); + hash = hash & (maxSessions - 1); + + return hash; +} + +/** + * add object node into trash, and this object is closed for referencing if it is add to trash + * It will be removed until the pNode->refCount == 0 + * @param pObj Cache object + * @param pNode Cache slot object + */ +static void taosAddToTrash(SCacheObj *pObj, SDataNode *pNode) { + if (pNode->hashVal == HASH_VALUE_IN_TRASH) { + /* node is already in trash */ + return; + } + + pNode->next = pObj->pTrash; + if (pObj->pTrash) { + pObj->pTrash->prev = pNode; + } + + pNode->prev = NULL; + pObj->pTrash = pNode; + + pNode->hashVal = HASH_VALUE_IN_TRASH; + pObj->numOfElemsInTrash++; + + pTrace("key:%s %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pObj->numOfElemsInTrash); +} + +static void taosRemoveFromTrash(SCacheObj *pObj, SDataNode *pNode) { + if (pNode->signature != (uint64_t)pNode) { + pError("key:sig:%d %p data has been released, ignore", pNode->signature, pNode); + return; + } + + pObj->numOfElemsInTrash--; + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + /* pnode is the header, update header */ + pObj->pTrash = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + pNode->signature = 0; + free(pNode); +} +/** + * remove nodes in trash with refCount == 0 in cache + * @param pNode + * @param pObj + * @param force force model, if true, remove data in trash without check refcount. + * may cause corruption. So, forece model only applys before cache is closed + */ +static void taosClearCacheTrash(SCacheObj *pObj, _Bool force) { + pthread_rwlock_wrlock(&pObj->lock); + + if (pObj->numOfElemsInTrash == 0) { + if (pObj->pTrash != NULL) { + pError("key:inconsistency data in cache, numOfElem in trash:%d", pObj->numOfElemsInTrash); + } + pObj->pTrash = NULL; + pthread_rwlock_unlock(&pObj->lock); + return; + } + + SDataNode *pNode = pObj->pTrash; + + while (pNode) { + if (pNode->refCount < 0) { + pError("key:%s %p in trash released more than referenced, removed", pNode->key, pNode); + pNode->refCount = 0; + } + + if (pNode->next == pNode) { + pNode->next = NULL; + } + + if (force || (pNode->refCount == 0)) { + pTrace("key:%s %p removed from trash. numOfElem in trash:%d", pNode->key, pNode, pObj->numOfElemsInTrash - 1) + SDataNode *pTmp = pNode; + pNode = pNode->next; + taosRemoveFromTrash(pObj, pTmp); + } else { + pNode = pNode->next; + } + } + + assert(pObj->numOfElemsInTrash >= 0); + pthread_rwlock_unlock(&pObj->lock); +} + +/** + * add data node into cache + * @param pObj cache object + * @param pNode Cache slot object + */ +static void taosAddToHashTable(SCacheObj *pObj, SDataNode *pNode) { + assert(pNode->hashVal >= 0); + + pNode->next = pObj->hashList[pNode->hashVal]; + + if (pObj->hashList[pNode->hashVal] != 0) { + (pObj->hashList[pNode->hashVal])->prev = pNode; + pObj->statistics.numOfCollision++; + } + pObj->hashList[pNode->hashVal] = pNode; + + pObj->total++; + pObj->totalSize += pNode->nodeSize; + + pTrace("key:%s %p add to hash table", pNode->key, pNode); +} + +/** + * remove node in hash list + * @param pObj + * @param pNode + */ +static void taosRemoveNodeInHashTable(SCacheObj *pObj, SDataNode *pNode) { + if (pNode->hashVal == HASH_VALUE_IN_TRASH) return; + + SDataNode *pNext = pNode->next; + if (pNode->prev) { + pNode->prev->next = pNext; + } else { + /* the node is in hashlist, remove it */ + pObj->hashList[pNode->hashVal] = pNext; + } + + if (pNext) { + pNext->prev = pNode->prev; + } + + pObj->total--; + pObj->totalSize -= pNode->nodeSize; + + pNode->next = NULL; + pNode->prev = NULL; + + pTrace("key:%s %p remove from hashtable", pNode->key, pNode); +} + +/** + * in-place node in hashlist + * @param pObj cache object + * @param pNode data node + */ +static void taosUpdateInHashTable(SCacheObj *pObj, SDataNode *pNode) { + assert(pNode->hashVal >= 0); + + if (pNode->prev) { + pNode->prev->next = pNode; + } else { + pObj->hashList[pNode->hashVal] = pNode; + } + + if (pNode->next) { + (pNode->next)->prev = pNode; + } + + pTrace("key:%s %p update hashtable", pNode->key, pNode); +} + +/** + * get SDataNode from hashlist, nodes from trash are not included. + * @param pObj Cache objection + * @param key key for hash + * @param keyLen key length + * @return + */ +static SDataNode *taosGetNodeFromHashTable(SCacheObj *pObj, char *key, uint32_t keyLen) { + int hash = (*pObj->hashFp)(pObj->maxSessions, key, keyLen); + + SDataNode *pNode = pObj->hashList[hash]; + while (pNode) { + if (strcmp(pNode->key, key) == 0) break; + + pNode = pNode->next; + } + + if (pNode) { + assert(pNode->hashVal == hash); + } + + return pNode; +} + +/** + * release node + * @param pObj cache object + * @param pNode data node + */ +static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pObj, SDataNode *pNode) { + taosRemoveNodeInHashTable(pObj, pNode); + if (pNode->signature != (uint64_t)pNode) { + pError("key:%s, %p data is invalid, or has been released", pNode->key, pNode); + return; + } + + pTrace("key:%s is removed from cache,total:%d,size:%ldbytes", pNode->key, pObj->total, pObj->totalSize); + pNode->signature = 0; + free(pNode); +} + +/** + * move the old node into trash + * @param pObj + * @param pNode + */ +static FORCE_INLINE void taosCacheMoveNodeToTrash(SCacheObj *pObj, SDataNode *pNode) { + taosRemoveNodeInHashTable(pObj, pNode); + taosAddToTrash(pObj, pNode); +} + +/** + * update data in cache + * @param pObj + * @param pNode + * @param key + * @param keyLen + * @param pData + * @param dataSize + * @return + */ +static SDataNode *taosUpdateCacheImpl(SCacheObj *pObj, SDataNode *pNode, char *key, int32_t keyLen, void *pData, + uint32_t dataSize, uint64_t keepTime) { + SDataNode *pNewNode = NULL; + + /* only a node is not referenced by any other object, in-place update it */ + if (pNode->refCount == 0) { + size_t newSize = sizeof(SDataNode) + dataSize + keyLen; + + pNewNode = (SDataNode *)realloc(pNode, newSize); + if (pNewNode == NULL) { + return NULL; + } + + pNewNode->signature = (uint64_t)pNewNode; + memcpy(pNewNode->data, pData, dataSize); + + pNewNode->key = pNewNode->data + dataSize; + strcpy(pNewNode->key, key); + + __sync_add_and_fetch_32(&pNewNode->refCount, 1); + taosUpdateInHashTable(pObj, pNewNode); + } else { + int32_t hashVal = pNode->hashVal; + taosCacheMoveNodeToTrash(pObj, pNode); + + pNewNode = taosCreateHashNode(key, keyLen, pData, dataSize, keepTime); + if (pNewNode == NULL) { + return NULL; + } + + __sync_add_and_fetch_32(&pNewNode->refCount, 1); + + assert(hashVal == (*pObj->hashFp)(pObj->maxSessions, key, keyLen - 1)); + pNewNode->hashVal = hashVal; + + /* add new one to hashtable */ + taosAddToHashTable(pObj, pNewNode); + } + + return pNewNode; +} + +/** + * add data into hash table + * @param key + * @param pData + * @param size + * @param pObj + * @param keyLen + * @param pNode + * @return + */ +static FORCE_INLINE SDataNode *taosAddToCacheImpl(SCacheObj *pObj, char *key, uint32_t keyLen, const char *pData, + int dataSize, uint64_t lifespan) { + SDataNode *pNode = taosCreateHashNode(key, keyLen, pData, dataSize, lifespan); + if (pNode == NULL) { + return NULL; + } + + __sync_add_and_fetch_32(&pNode->refCount, 1); + pNode->hashVal = (*pObj->hashFp)(pObj->maxSessions, key, keyLen - 1); + taosAddToHashTable(pObj, pNode); + + return pNode; +} + +/** + * add data into cache + * + * @param handle cache object + * @param key key + * @param pData cached data + * @param dataSize data size + * @param keepTime survival time in second + * @return cached element + */ +void *taosAddDataIntoCache(void *handle, char *key, char *pData, int dataSize, int keepTime) { + SDataNode *pNode; + SCacheObj *pObj; + + pObj = (SCacheObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + uint32_t keyLen = (uint32_t)strlen(key) + 1; + + pthread_rwlock_wrlock(&pObj->lock); + SDataNode *pOldNode = taosGetNodeFromHashTable(pObj, key, keyLen - 1); + + if (pOldNode == NULL) { // do add to cache + pNode = taosAddToCacheImpl(pObj, key, keyLen, pData, dataSize, keepTime * 1000L); + pTrace("key:%s %p added into cache,slot:%d,expireTime:%lld,cache total:%d,size:%ldbytes,collision:%d", pNode->key, + pNode, pNode->hashVal, pNode->time, pObj->total, pObj->totalSize, pObj->statistics.numOfCollision); + } else { // old data exists, update the node + pNode = taosUpdateCacheImpl(pObj, pOldNode, key, keyLen, pData, dataSize, keepTime * 1000L); + // pWarn("key:%s %p exist in cache,updated", key, pNode); + } + + pthread_rwlock_unlock(&pObj->lock); + return (pNode != NULL) ? pNode->data : NULL; +} + +/** + * remove data in cache, the data will not be removed immediately. + * if it is referenced by other object, it will be remain in cache + * @param handle + * @param data + */ +void taosRemoveDataFromCache(void *handle, void **data, _Bool isForce) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0 || (*data) == NULL || (pObj->total + pObj->numOfElemsInTrash == 0)) return; + + size_t offset = offsetof(SDataNode, data); + SDataNode *pNode = (SDataNode *)((char *)(*data) - offset); + + if (pNode->signature != (uint64_t)pNode) { + pError("key: %p release invalid cache data", pNode); + return; + } + + if (pNode->refCount > 0) { + __sync_add_and_fetch_32(&pNode->refCount, -1); + pTrace("key:%s is released by app.refcnt:%d", pNode->key, pNode->refCount); + } else { + /* + * safety check. + * app may false releases cached object twice, to decrease the refcount more than acquired + */ + pError("key:%s is released by app more than referenced.refcnt:%d", pNode->key, pNode->refCount); + } + + *data = NULL; + + if (isForce) { + pthread_rwlock_wrlock(&pObj->lock); + taosCacheMoveNodeToTrash(pObj, pNode); + pthread_rwlock_unlock(&pObj->lock); + } +} + +/** + * get data from cache + * @param handle cache object + * @param key key + * @return cached data or NULL + */ +void *taosGetDataFromCache(void *handle, char *key) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + uint32_t keyLen = (uint32_t)strlen(key); + + pthread_rwlock_rdlock(&pObj->lock); + SDataNode *ptNode = taosGetNodeFromHashTable(handle, key, keyLen); + pthread_rwlock_unlock(&pObj->lock); + + if (ptNode != NULL) { + __sync_add_and_fetch_32(&ptNode->refCount, 1); + __sync_add_and_fetch_32(&pObj->statistics.hitCount, 1); + + pTrace("key:%s is retrieved from cache,refcnt:%d", key, ptNode->refCount); + } else { + __sync_add_and_fetch_32(&pObj->statistics.missCount, 1); + pTrace("key:%s not in cache,retrieved failed", key); + } + + __sync_add_and_fetch_32(&pObj->statistics.totalAccess, 1); + return (ptNode != NULL) ? ptNode->data : NULL; +} + +/** + * update data in cache + * @param handle hash object handle(pointer) + * @param key key for hash + * @param pData actually data + * @param size length of data + * @return new referenced data + */ +void *taosUpdateDataFromCache(void *handle, char *key, char *pData, int size, int duration) { + SCacheObj *pObj = (SCacheObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return NULL; + + SDataNode *pNew = NULL; + + uint32_t keyLen = strlen(key) + 1; + + pthread_rwlock_wrlock(&pObj->lock); + + SDataNode *pNode = taosGetNodeFromHashTable(handle, key, keyLen - 1); + if (pNode == NULL) { // object has been released, do add operation + pNew = taosAddToCacheImpl(pObj, key, keyLen, pData, size, duration * 1000L); + pWarn("key:%s does not exist, update failed,do add to cache.total:%d,size:%ldbytes", key, pObj->total, + pObj->totalSize); + } else { + pNew = taosUpdateCacheImpl(pObj, pNode, key, keyLen, pData, size, duration * 1000L); + pTrace("key:%s updated.expireTime:%lld.refCnt:%d", key, pNode->time, pNode->refCount); + } + + pthread_rwlock_unlock(&pObj->lock); + + return (pNew != NULL) ? pNew->data : NULL; +} + +/** + * refresh cache to remove data in both hashlist and trash, if any nodes' refcount == 0, every pObj->refreshTime + * @param handle Cache object handle + */ +void taosRefreshDataCache(void *handle, void *tmrId) { + SDataNode *pNode, *pNext; + SCacheObj *pObj = (SCacheObj *)handle; + + if (pObj == NULL || (pObj->total == 0 && pObj->numOfElemsInTrash == 0)) { + taosTmrReset(taosRefreshDataCache, pObj->refreshTime, pObj, pObj->tmrCtrl, &pObj->pTimer); + return; + } + + uint64_t time = taosGetTimestampMs(); + uint32_t numOfCheck = 0; + pObj->statistics.refreshCount++; + + int32_t num = pObj->total; + + for (int hash = 0; hash < pObj->maxSessions; ++hash) { + pthread_rwlock_wrlock(&pObj->lock); + pNode = pObj->hashList[hash]; + + while (pNode) { + numOfCheck++; + pNext = pNode->next; + + if (pNode->time <= time && pNode->refCount <= 0) { + taosCacheReleaseNode(pObj, pNode); + } + pNode = pNext; + } + + /* all data have been checked, not need to iterate further */ + if (numOfCheck == num || pObj->total <= 0) { + pthread_rwlock_unlock(&pObj->lock); + break; + } + + pthread_rwlock_unlock(&pObj->lock); + } + + taosClearCacheTrash(pObj, false); + taosTmrReset(taosRefreshDataCache, pObj->refreshTime, pObj, pObj->tmrCtrl, &pObj->pTimer); +} + +/** + * + * @param handle + * @param tmrId + */ +void taosClearDataCache(void *handle) { + SDataNode *pNode, *pNext; + SCacheObj *pObj = (SCacheObj *)handle; + + for (int hash = 0; hash < pObj->maxSessions; ++hash) { + pthread_rwlock_wrlock(&pObj->lock); + pNode = pObj->hashList[hash]; + + while (pNode) { + pNext = pNode->next; + taosCacheMoveNodeToTrash(pObj, pNode); + pNode = pNext; + } + pthread_rwlock_unlock(&pObj->lock); + } + + taosClearCacheTrash(pObj, false); +} + +/** + * + * @param maxSessions maximum slots available for hash elements + * @param tmrCtrl timer ctrl + * @param refreshTime refresh operation interval time, the maximum survival time when one element is expired and + * not referenced by other objects + * @return + */ +void *taosInitDataCache(int maxSessions, void *tmrCtrl, int64_t refreshTime) { + if (tmrCtrl == NULL || refreshTime <= 0 || maxSessions <= 0) { + return NULL; + } + + SCacheObj *pObj = (SCacheObj *)calloc(1, sizeof(SCacheObj)); + if (pObj == NULL) { + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + pObj->maxSessions = taosNormalHashTableLength(maxSessions); + + pObj->hashFp = taosHashKey; + pObj->refreshTime = refreshTime * 1000; + + pObj->hashList = (SDataNode **)calloc(1, sizeof(SDataNode *) * pObj->maxSessions); + if (pObj->hashList == NULL) { + free(pObj); + pError("failed to allocate memory, reason:%s", strerror(errno)); + return NULL; + } + + pObj->tmrCtrl = tmrCtrl; + taosTmrReset(taosRefreshDataCache, pObj->refreshTime, pObj, pObj->tmrCtrl, &pObj->pTimer); + + if (pthread_rwlock_init(&pObj->lock, NULL) != 0) { + taosTmrStopA(&pObj->pTimer); + free(pObj->hashList); + free(pObj); + + pError("failed to init lock, reason:%s", strerror(errno)); + return NULL; + } + + return (void *)pObj; +} + +/** + * release all allocated memory and destroy the cache object + * + * @param handle + */ +void taosCleanUpDataCache(void *handle) { + SCacheObj *pObj; + SDataNode *pNode, *pNext; + + pObj = (SCacheObj *)handle; + if (pObj == NULL || pObj->maxSessions <= 0) { + pthread_rwlock_destroy(&pObj->lock); + free(pObj); + return; + } + + taosTmrStopA(&pObj->pTimer); + + pthread_rwlock_wrlock(&pObj->lock); + + if (pObj->hashList && pObj->total > 0) { + for (int i = 0; i < pObj->maxSessions; ++i) { + pNode = pObj->hashList[i]; + while (pNode) { + pNext = pNode->next; + free(pNode); + pNode = pNext; + } + } + + free(pObj->hashList); + } + + pthread_rwlock_unlock(&pObj->lock); + + taosClearCacheTrash(pObj, true); + + pthread_rwlock_destroy(&pObj->lock); + memset(pObj, 0, sizeof(SCacheObj)); + + free(pObj); +} diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c new file mode 100644 index 000000000000..1ec7cab1b3a9 --- /dev/null +++ b/src/util/src/tcompression.c @@ -0,0 +1,1123 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +/* README.md TAOS compression + * + * INTEGER Compression Algorithm: + * To compress integers (including char, short, int, int64_t), the difference + * between two integers is calculated at first. Then the difference is + * transformed to positive by zig-zag encoding method + * (https://gist.github.com/mfuerstenau/ba870a29e16536fdbaba). Then the value + * is + * encoded using simple 8B method. For more information about simple 8B, + * refer to https://en.wikipedia.org/wiki/8b/10b_encoding. + * + * NOTE : For bigint, only 59 bits can be used, which means data from -(2**59) + * to (2**59)-1 + * are allowed. + * + * BOOLEAN Compression Algorithm: + * We provide two methods for compress boolean types. Because boolean types in + * C + * code are char bytes with 0 and 1 values only, only one bit can used to + * discrimenate + * the values. + * 1. The first method is using only 1 bit to represent the boolean value with + * 1 for + * true and 0 for false. Then the compression rate is 1/8. + * 2. The second method is using run length encoding (RLE) methods. This + * methos works + * better when there are a lot of consecutive true values or false values. + * + * STRING Compression Algorithm: + * We us LZ4 method to compress the string type. + * + * FLOAT Compression Algorithm: + * We use the same method with Akumuli to compress float and double types. The + * compression + * algorithm assumes the float/double values change slightly. So we take the + * XOR between two + * adjacent values. Then compare the number of leading zeros and trailing + * zeros. If the number + * of leading zeros are larger than the trailing zeros, then record the last + * serveral bytes + * of the XORed value with informations. If not, record the first + * corresponding bytes. + * + */ +#include +#include +#include +#include +#include +#include + +#include "lz4.h" +#include "tscompression.h" +#include "tsdb.h" +#include "ttypes.h" + +const int TEST_NUMBER = 1; +#define is_bigendian() ((*(char *)&TEST_NUMBER) == 0) +#define SIMPLE8B_MAX_INT64 ((uint64_t)2305843009213693951L) + +// Function declarations +int tsCompressINTImp(const char *const input, const int nelements, char *const output, const char type); +int tsDecompressINTImp(const char *const input, const int nelements, char *const output, const char type); +int tsCompressBoolImp(const char *const input, const int nelements, char *const output); +int tsDecompressBoolImp(const char *const input, const int nelements, char *const output); +int tsCompressStringImp(const char *const input, int inputSize, char *const output, int outputSize); +int tsDecompressStringImp(const char *const input, int compressedSize, char *const output, int outputSize); +int tsCompressTimestampImp(const char *const input, const int nelements, char *const output); +int tsDecompressTimestampImp(const char *const input, const int nelements, char *const output); +int tsCompressDoubleImp(const char *const input, const int nelements, char *const output); +int tsDecompressDoubleImp(const char *const input, const int nelements, char *const output); +int tsCompressFloatImp(const char *const input, const int nelements, char *const output); +int tsDecompressFloatImp(const char *const input, const int nelements, char *const output); + +/* ----------------------------------------------Compression function used by + * others ---------------------------------------------- */ +int tsCompressTinyint(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, + char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressINTImp(input, nelements, output, TSDB_DATA_TYPE_TINYINT); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressINTImp(input, nelements, buffer, TSDB_DATA_TYPE_TINYINT); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressTinyint(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_TINYINT); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_TINYINT); + } else { + assert(0); + } +} + +int tsCompressSmallint(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, + char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressINTImp(input, nelements, output, TSDB_DATA_TYPE_SMALLINT); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressINTImp(input, nelements, buffer, TSDB_DATA_TYPE_SMALLINT); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressSmallint(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_SMALLINT); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_SMALLINT); + } else { + assert(0); + } +} + +int tsCompressInt(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, + char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressINTImp(input, nelements, output, TSDB_DATA_TYPE_INT); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressINTImp(input, nelements, buffer, TSDB_DATA_TYPE_INT); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressInt(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_INT); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_INT); + } else { + assert(0); + } +} + +int tsCompressBigint(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressINTImp(input, nelements, output, TSDB_DATA_TYPE_BIGINT); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressINTImp(input, nelements, buffer, TSDB_DATA_TYPE_BIGINT); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressBigint(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressINTImp(input, nelements, output, TSDB_DATA_TYPE_BIGINT); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressINTImp(buffer, nelements, output, TSDB_DATA_TYPE_BIGINT); + } else { + assert(0); + } +} + +int tsCompressBool(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressBoolImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressBoolImp(input, nelements, buffer); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressBool(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressBoolImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressBoolImp(buffer, nelements, output); + } else { + assert(0); + } +} + +int tsCompressString(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + return tsCompressStringImp(input, inputSize, output, outputSize); +} + +int tsDecompressString(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + return tsDecompressStringImp(input, compressedSize, output, outputSize); +} + +int tsCompressFloat(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressFloatImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressFloatImp(input, nelements, buffer); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressFloat(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressFloatImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressFloatImp(buffer, nelements, output); + } else { + assert(0); + } +} +int tsCompressDouble(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressDoubleImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressDoubleImp(input, nelements, buffer); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressDouble(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressDoubleImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressDoubleImp(buffer, nelements, output); + } else { + assert(0); + } +} + +int tsCompressTimestamp(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsCompressTimestampImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressTimestampImp(input, nelements, buffer); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + } +} + +int tsDecompressTimestamp(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize) { + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressTimestampImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + tsDecompressStringImp(input, compressedSize, buffer, bufferSize); + return tsDecompressTimestampImp(buffer, nelements, output); + } else { + assert(0); + } +} + +bool safeInt64Add(int64_t a, int64_t b) { + if ((a > 0 && b > INT64_MAX - a) || (a < 0 && b < INT64_MIN - a)) return false; + return true; +} + +/* + * Compress Integer (Simple8B). + */ +int tsCompressINTImp(const char *const input, const int nelements, char *const output, const char type) { + // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 + // 12 13 14 15 + char bit_per_integer[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60}; + int selector_to_elems[] = {240, 120, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1}; + char bit_to_selector[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}; + + // get the byte limit. + int word_length = 0; + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + word_length = LONG_BYTES; + break; + case TSDB_DATA_TYPE_INT: + word_length = INT_BYTES; + break; + case TSDB_DATA_TYPE_SMALLINT: + word_length = SHORT_BYTES; + break; + case TSDB_DATA_TYPE_TINYINT: + word_length = CHAR_BYTES; + break; + default: + perror("Wrong integer types.\n"); + exit(1); + } + + int byte_limit = nelements * word_length + 1; + int opos = 1; + int64_t prev_value = 0; + + for (int i = 0; i < nelements;) { + char selector = 0; + char bit = 0; + int elems = 0; + int64_t prev_value_tmp = prev_value; + + for (int j = i; j < nelements; j++) { + // Read data from the input stream and convert it to INT64 type. + int64_t curr_value = 0; + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + curr_value = (int64_t)(*((int8_t *)input + j)); + break; + case TSDB_DATA_TYPE_SMALLINT: + curr_value = (int64_t)(*((int16_t *)input + j)); + break; + case TSDB_DATA_TYPE_INT: + curr_value = (int64_t)(*((int32_t *)input + j)); + break; + case TSDB_DATA_TYPE_BIGINT: + curr_value = (int64_t)(*((int64_t *)input + j)); + break; + } + // Get difference. + if (!safeInt64Add(curr_value, -prev_value)) goto _copy_and_exit; + + int64_t diff = curr_value - prev_value_tmp; + // Zigzag encode the value. + uint64_t zigzag_value = (diff >> (LONG_BYTES * BITS_PER_BYTE - 1)) ^ (diff << 1); + + if (zigzag_value >= SIMPLE8B_MAX_INT64) goto _copy_and_exit; + + char tmp_bit; + if (zigzag_value == 0) { + // Take care here, __builtin_clzl give wrong anser for value 0; + tmp_bit = 0; + } else { + tmp_bit = (LONG_BYTES * BITS_PER_BYTE) - __builtin_clzl(zigzag_value); + } + + if (elems + 1 <= selector_to_elems[selector] && elems + 1 <= selector_to_elems[bit_to_selector[tmp_bit]]) { + // If can hold another one. + selector = selector > bit_to_selector[tmp_bit] ? selector : bit_to_selector[tmp_bit]; + elems++; + bit = bit_per_integer[selector]; + } else { + // if cannot hold another one. + while (elems < selector_to_elems[selector]) selector++; + elems = selector_to_elems[selector]; + bit = bit_per_integer[selector]; + break; + } + prev_value_tmp = curr_value; + } + + uint64_t buffer = 0; + buffer |= (uint64_t)selector; + for (int k = 0; k < elems; k++) { + int64_t curr_value = 0; /* get current values */ + switch (type) { + case TSDB_DATA_TYPE_TINYINT: + curr_value = (int64_t)(*((int8_t *)input + i)); + break; + case TSDB_DATA_TYPE_SMALLINT: + curr_value = (int64_t)(*((int16_t *)input + i)); + break; + case TSDB_DATA_TYPE_INT: + curr_value = (int64_t)(*((int32_t *)input + i)); + break; + case TSDB_DATA_TYPE_BIGINT: + curr_value = (int64_t)(*((int64_t *)input + i)); + break; + } + int64_t diff = curr_value - prev_value; + uint64_t zigzag_value = (diff >> (LONG_BYTES * BITS_PER_BYTE - 1)) ^ (diff << 1); + buffer |= ((zigzag_value & INT64MASK(bit)) << (bit * k + 4)); + i++; + prev_value = curr_value; + } + + // Output the encoded value to the output. + if (opos + sizeof(buffer) <= byte_limit) { + memcpy(output + opos, &buffer, sizeof(buffer)); + opos += sizeof(buffer); + } else { + _copy_and_exit: + output[0] = 1; + memcpy(output + 1, input, byte_limit - 1); + return byte_limit; + } + } + + // set the indicator. + output[0] = 0; + return opos; +} + +int tsDecompressINTImp(const char *const input, const int nelements, char *const output, const char type) { + int word_length = 0; + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + word_length = LONG_BYTES; + break; + case TSDB_DATA_TYPE_INT: + word_length = INT_BYTES; + break; + case TSDB_DATA_TYPE_SMALLINT: + word_length = SHORT_BYTES; + break; + case TSDB_DATA_TYPE_TINYINT: + word_length = CHAR_BYTES; + break; + default: + perror("Wrong integer types.\n"); + exit(1); + } + + // If not compressed. + if (input[0] == 1) { + memcpy(output, input + 1, nelements * word_length); + return nelements * word_length; + } + + // Selector value: 0 1 2 3 4 5 6 7 8 9 10 11 + // 12 13 14 15 + char bit_per_integer[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60}; + int selector_to_elems[] = {240, 120, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1}; + + const char *ip = input + 1; + int count = 0; + int _pos = 0; + int64_t prev_value = 0; + + while (1) { + if (count == nelements) break; + + uint64_t w = 0; + memcpy(&w, ip, LONG_BYTES); + + char selector = (char)(w & INT64MASK(4)); // selector = 4 + char bit = bit_per_integer[selector]; // bit = 3 + int elems = selector_to_elems[selector]; + + for (int i = 0; i < elems; i++) { + uint64_t zigzag_value; + + if (selector == 0 || selector == 1) { + zigzag_value = 0; + } else { + zigzag_value = ((w >> (4 + bit * i)) & INT64MASK(bit)); + } + int64_t diff = (zigzag_value >> 1) ^ -(zigzag_value & 1); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; + + switch (type) { + case TSDB_DATA_TYPE_BIGINT: + *((int64_t *)output + _pos) = curr_value; + _pos++; + break; + case TSDB_DATA_TYPE_INT: + *((int32_t *)output + _pos) = curr_value; + _pos++; + break; + case TSDB_DATA_TYPE_SMALLINT: + *((int16_t *)output + _pos) = curr_value; + _pos++; + break; + case TSDB_DATA_TYPE_TINYINT: + *((int8_t *)output + _pos) = curr_value; + _pos++; + break; + default: + perror("Wrong integer types.\n"); + exit(1); + } + count++; + if (count == nelements) break; + } + ip += LONG_BYTES; + } + + return nelements * word_length; +} + +/* ----------------------------------------------Bool Compression + * ---------------------------------------------- */ +// TODO: You can also implement it using RLE method. +int tsCompressBoolImp(const char *const input, const int nelements, char *const output) { + int pos = -1; + int ele_per_byte = BITS_PER_BYTE / 2; + + for (int i = 0; i < nelements; i++) { + if (i % ele_per_byte == 0) { + pos++; + output[pos] = 0; + } + + uint8_t t = 0; + if (input[i] == 1) { + t = (((uint8_t)1) << (2 * (i % ele_per_byte))); + output[pos] |= t; + } else if (input[i] == 0) { + t = ((uint8_t)1 << (2 * (i % ele_per_byte))) - 1; + /* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */ + output[pos] &= t; + } else if (input[i] == TSDB_DATA_BOOL_NULL) { + t = ((uint8_t)2 << (2 * (i % ele_per_byte))); + /* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */ + output[pos] |= t; + } else { + perror("Wrong bool value.\n"); + exit(1); + } + } + + return pos + 1; +} + +int tsDecompressBoolImp(const char *const input, const int nelements, char *const output) { + int ipos = -1, opos = 0; + int ele_per_byte = BITS_PER_BYTE / 2; + + for (int i = 0; i < nelements; i++) { + if (i % ele_per_byte == 0) { + ipos++; + } + + uint8_t ele = (input[ipos] >> (2 * (i % ele_per_byte))) & INT8MASK(2); + if (ele == 1) { + output[opos++] = 1; + } else if (ele == 2) { + output[opos++] = TSDB_DATA_BOOL_NULL; + } else { + output[opos++] = 0; + } + } + + return nelements; +} + +/* Run Length Encoding(RLE) Method */ +int tsCompressBoolRLEImp(const char *const input, const int nelements, char *const output) { + int _pos = 0; + + for (int i = 0; i < nelements;) { + unsigned char counter = 1; + char num = input[i]; + + for (++i; i < nelements; i++) { + if (input[i] == num) { + counter++; + if (counter == INT8MASK(7)) { + i++; + break; + } + } else { + break; + } + } + + // Encode the data. + if (num == 1) { + output[_pos++] = INT8MASK(1) | (counter << 1); + } else if (num == 0) { + output[_pos++] = (counter << 1) | INT8MASK(0); + } else { + perror("Wrong bool value!\n"); + exit(1); + } + } + + return _pos; +} + +int tsDecompressBoolRLEImp(const char *const input, const int nelements, char *const output) { + int ipos = 0, opos = 0; + while (1) { + char encode = input[ipos++]; + unsigned counter = (encode >> 1) & INT8MASK(7); + char value = encode & INT8MASK(1); + + memset(output + opos, value, counter); + opos += counter; + if (opos >= nelements) { + return nelements; + } + } +} + +/* ----------------------------------------------String Compression + * ---------------------------------------------- */ +// Note: the size of the output must be larger than input_size + 1 and +// LZ4_compressBound(size) + 1; +// >= max(input_size, LZ4_compressBound(input_size)) + 1; +int tsCompressStringImp(const char *const input, int inputSize, char *const output, int outputSize) { + // Try to compress using LZ4 algorithm. + const int compressed_data_size = LZ4_compress_default(input, output + 1, inputSize, outputSize-1); + + // If cannot compress or after compression, data becomes larger. + if (compressed_data_size <= 0 || compressed_data_size > inputSize) { + /* First byte is for indicator */ + output[0] = 0; + memcpy(output + 1, input, inputSize); + return inputSize + 1; + } + + output[0] = 1; + return compressed_data_size + 1; +} + +int tsDecompressStringImp(const char *const input, int compressedSize, char *const output, int outputSize) { + // compressedSize is the size of data after compression. + if (input[0] == 1) { + /* It is compressed by LZ4 algorithm */ + const int decompressed_size = LZ4_decompress_safe(input + 1, output, compressedSize - 1, outputSize); + if (decompressed_size < 0) { + char msg[128] = {0}; + sprintf(msg, "decomp_size:%d, Error decompress in LZ4 algorithm!\n", decompressed_size); + perror(msg); + exit(EXIT_FAILURE); + } + + return decompressed_size; + } else if (input[0] == 0) { + /* It is not compressed by LZ4 algorithm */ + memcpy(output, input + 1, compressedSize - 1); + return compressedSize - 1; + } else { + perror("Wrong compressed string indicator!\n"); + exit(EXIT_FAILURE); + } +} + +/* --------------------------------------------Timestamp Compression + * ---------------------------------------------- */ +// TODO: Take care here, we assumes little endian encoding. +int tsCompressTimestampImp(const char *const input, const int nelements, char *const output) { + int _pos = 1; + assert(nelements >= 0); + + if (nelements == 0) return 0; + + int64_t *istream = (int64_t *)input; + + int64_t prev_value = istream[0]; + int64_t prev_delta = -prev_value; + uint8_t flags = 0, flag1 = 0, flag2 = 0; + uint64_t dd1 = 0, dd2 = 0; + + for (int i = 0; i < nelements; i++) { + int64_t curr_value = istream[i]; + if (!safeInt64Add(curr_value, -prev_value)) goto _exit_over; + int64_t curr_delta = curr_value - prev_value; + if (!safeInt64Add(curr_delta, -prev_delta)) goto _exit_over; + int64_t delta_of_delta = curr_delta - prev_delta; + // zigzag encode the value. + uint64_t zigzag_value = (delta_of_delta >> (LONG_BYTES * BITS_PER_BYTE - 1)) ^ (delta_of_delta << 1); + if (i % 2 == 0) { + flags = 0; + dd1 = zigzag_value; + if (dd1 == 0) { + flag1 = 0; + } else { + flag1 = LONG_BYTES - __builtin_clzl(dd1) / BITS_PER_BYTE; + } + } else { + dd2 = zigzag_value; + if (dd2 == 0) { + flag2 = 0; + } else { + flag2 = LONG_BYTES - __builtin_clzl(dd2) / BITS_PER_BYTE; + } + flags = flag1 | (flag2 << 4); + // Encode the flag. + if ((_pos + CHAR_BYTES - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, &flags, CHAR_BYTES); + _pos += CHAR_BYTES; + /* Here, we assume it is little endian encoding method. */ + // Encode dd1 + if (is_bigendian()) { + if ((_pos + flag1 - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, (char *)(&dd1) + LONG_BYTES - flag1, flag1); + } else { + if ((_pos + flag1 - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, (char *)(&dd1), flag1); + } + _pos += flag1; + // Encode dd2; + if (is_bigendian()) { + if ((_pos + flag2 - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, (char *)(&dd2) + LONG_BYTES - flag2, flag2); + } else { + if ((_pos + flag2 - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, (char *)(&dd2), flag2); + } + _pos += flag2; + } + prev_value = curr_value; + prev_delta = curr_delta; + } + + if (nelements % 2 == 1) { + flag2 = 0; + flags = flag1 | (flag2 << 4); + // Encode the flag. + if ((_pos + CHAR_BYTES - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, &flags, CHAR_BYTES); + _pos += CHAR_BYTES; + // Encode dd1; + if (is_bigendian()) { + if ((_pos + flag1 - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, (char *)(&dd1) + LONG_BYTES - flag1, flag1); + } else { + if ((_pos + flag1 - 1) >= nelements * LONG_BYTES) goto _exit_over; + memcpy(output + _pos, (char *)(&dd1), flag1); + } + _pos += flag1; + } + + output[0] = 1; // Means the string is compressed + return _pos; + +_exit_over: + output[0] = 0; // Means the string is not compressed + memcpy(output + 1, input, nelements * LONG_BYTES); + return nelements * LONG_BYTES + 1; +} + +int tsDecompressTimestampImp(const char *const input, const int nelements, char *const output) { + assert(nelements >= 0); + if (nelements == 0) return 0; + + if (input[0] == 0) { + memcpy(output, input + 1, nelements * LONG_BYTES); + return nelements * LONG_BYTES; + } else if (input[0] == 1) { // Decompress + int64_t *ostream = (int64_t *)output; + + int ipos = 1, opos = 0; + int8_t nbytes = 0; + int64_t prev_value = 0; + int64_t prev_delta = 0; + int64_t delta_of_delta = 0; + + while (1) { + uint8_t flags = input[ipos++]; + // Decode dd1 + uint64_t dd1 = 0; + nbytes = flags & INT8MASK(4); + if (nbytes == 0) { + delta_of_delta = 0; + } else { + if (is_bigendian()) { + memcpy(&dd1 + LONG_BYTES - nbytes, input + ipos, nbytes); + } else { + memcpy(&dd1, input + ipos, nbytes); + } + delta_of_delta = (dd1 >> 1) ^ -(dd1 & 1); + } + ipos += nbytes; + if (opos == 0) { + prev_value = delta_of_delta; + prev_delta = 0; + ostream[opos++] = delta_of_delta; + } else { + prev_delta = delta_of_delta + prev_delta; + prev_value = prev_value + prev_delta; + ostream[opos++] = prev_value; + } + if (opos == nelements) return nelements * LONG_BYTES; + + // Decode dd2 + uint64_t dd2 = 0; + nbytes = (flags >> 4) & INT8MASK(4); + if (nbytes == 0) { + delta_of_delta = 0; + } else { + if (is_bigendian()) { + memcpy(&dd2 + LONG_BYTES - nbytes, input + ipos, nbytes); + } else { + memcpy(&dd2, input + ipos, nbytes); + } + // zigzag_decoding + delta_of_delta = (dd2 >> 1) ^ -(dd2 & 1); + } + ipos += nbytes; + prev_delta = delta_of_delta + prev_delta; + prev_value = prev_value + prev_delta; + ostream[opos++] = prev_value; + if (opos == nelements) return nelements * LONG_BYTES; + } + + } else { + assert(0); + } +} +/* --------------------------------------------Double Compression + * ---------------------------------------------- */ +void encodeDoubleValue(uint64_t diff, uint8_t flag, char *const output, int *const pos) { + uint8_t nbytes = (flag & INT8MASK(3)) + 1; + int nshift = (LONG_BYTES * BITS_PER_BYTE - nbytes * BITS_PER_BYTE) * (flag >> 3); + diff >>= nshift; + + while (nbytes) { + output[(*pos)++] = (int8_t)(diff & INT64MASK(8)); + diff >>= BITS_PER_BYTE; + nbytes--; + } +} + +int tsCompressDoubleImp(const char *const input, const int nelements, char *const output) { + int byte_limit = nelements * DOUBLE_BYTES + 1; + int opos = 1; + + uint64_t prev_value = 0; + uint64_t prev_diff = 0; + uint8_t prev_flag = 0; + + double *istream = (double *)input; + + // Main loop + for (int i = 0; i < nelements; i++) { + union { + double real; + uint64_t bits; + } curr; + + curr.real = istream[i]; + + // Here we assume the next value is the same as previous one. + uint64_t predicted = prev_value; + uint64_t diff = curr.bits ^ predicted; + + int leading_zeros = LONG_BYTES * BITS_PER_BYTE; + int trailing_zeros = leading_zeros; + + if (diff) { + trailing_zeros = __builtin_ctzl(diff); + leading_zeros = __builtin_clzl(diff); + } + + uint8_t nbytes = 0; + uint8_t flag; + + if (trailing_zeros > leading_zeros) { + nbytes = LONG_BYTES - trailing_zeros / BITS_PER_BYTE; + + if (nbytes > 0) nbytes--; + flag = ((uint8_t)1 << 3) | nbytes; + } else { + nbytes = LONG_BYTES - leading_zeros / BITS_PER_BYTE; + if (nbytes > 0) nbytes--; + flag = nbytes; + } + + if (i % 2 == 0) { + prev_diff = diff; + prev_flag = flag; + } else { + int nbyte1 = (prev_flag & INT8MASK(3)) + 1; + int nbyte2 = (flag & INT8MASK(3)) + 1; + if (opos + 1 + nbyte1 + nbyte2 <= byte_limit) { + uint8_t flags = prev_flag | (flag << 4); + output[opos++] = flags; + encodeDoubleValue(prev_diff, prev_flag, output, &opos); + encodeDoubleValue(diff, flag, output, &opos); + } else { + output[0] = 1; + memcpy(output + 1, input, byte_limit - 1); + return byte_limit; + } + } + prev_value = curr.bits; + } + + if (nelements % 2) { + int nbyte1 = (prev_flag & INT8MASK(3)) + 1; + int nbyte2 = 1; + if (opos + 1 + nbyte1 + nbyte2 <= byte_limit) { + uint8_t flags = prev_flag; + output[opos++] = flags; + encodeDoubleValue(prev_diff, prev_flag, output, &opos); + encodeDoubleValue(0ul, 0, output, &opos); + } else { + output[0] = 1; + memcpy(output + 1, input, byte_limit - 1); + return byte_limit; + } + } + + output[0] = 0; + return opos; +} + +uint64_t decodeDoubleValue(const char *const input, int *const ipos, uint8_t flag) { + uint64_t diff = 0ul; + int nbytes = (flag & INT8MASK(3)) + 1; + for (int i = 0; i < nbytes; i++) { + diff = diff | ((INT64MASK(8) & input[(*ipos)++]) << BITS_PER_BYTE * i); + } + int shift_width = (LONG_BYTES * BITS_PER_BYTE - nbytes * BITS_PER_BYTE) * (flag >> 3); + diff <<= shift_width; + + return diff; +} + +int tsDecompressDoubleImp(const char *const input, const int nelements, char *const output) { + // output stream + double *ostream = (double *)output; + + if (input[0] == 1) { + memcpy(output, input + 1, nelements * DOUBLE_BYTES); + return nelements * DOUBLE_BYTES; + } + + uint8_t flags = 0; + int ipos = 1; + int opos = 0; + uint64_t prev_value = 0; + + for (int i = 0; i < nelements; i++) { + if (i % 2 == 0) { + flags = input[ipos++]; + } + + uint8_t flag = flags & INT8MASK(4); + flags >>= 4; + + uint64_t diff = decodeDoubleValue(input, &ipos, flag); + union { + uint64_t bits; + double real; + } curr; + + uint64_t predicted = prev_value; + curr.bits = predicted ^ diff; + prev_value = curr.bits; + + ostream[opos++] = curr.real; + } + + return nelements * DOUBLE_BYTES; +} + +/* --------------------------------------------Float Compression + * ---------------------------------------------- */ +void encodeFloatValue(uint32_t diff, uint8_t flag, char *const output, int *const pos) { + uint8_t nbytes = (flag & INT8MASK(3)) + 1; + int nshift = (FLOAT_BYTES * BITS_PER_BYTE - nbytes * BITS_PER_BYTE) * (flag >> 3); + diff >>= nshift; + + while (nbytes) { + output[(*pos)++] = (int8_t)(diff & INT32MASK(8)); + diff >>= BITS_PER_BYTE; + nbytes--; + } +} + +int tsCompressFloatImp(const char *const input, const int nelements, char *const output) { + float *istream = (float *)input; + int byte_limit = nelements * FLOAT_BYTES + 1; + int opos = 1; + + uint32_t prev_value = 0; + uint32_t prev_diff = 0; + uint8_t prev_flag = 0; + + // Main loop + for (int i = 0; i < nelements; i++) { + union { + float real; + uint32_t bits; + } curr; + + curr.real = istream[i]; + + // Here we assume the next value is the same as previous one. + uint32_t predicted = prev_value; + uint32_t diff = curr.bits ^ predicted; + + int leading_zeros = FLOAT_BYTES * BITS_PER_BYTE; + int trailing_zeros = leading_zeros; + + if (diff) { + trailing_zeros = __builtin_ctz(diff); + leading_zeros = __builtin_clz(diff); + } + + uint8_t nbytes = 0; + uint8_t flag; + + if (trailing_zeros > leading_zeros) { + nbytes = FLOAT_BYTES - trailing_zeros / BITS_PER_BYTE; + + if (nbytes > 0) nbytes--; + flag = ((uint8_t)1 << 3) | nbytes; + } else { + nbytes = FLOAT_BYTES - leading_zeros / BITS_PER_BYTE; + if (nbytes > 0) nbytes--; + flag = nbytes; + } + + if (i % 2 == 0) { + prev_diff = diff; + prev_flag = flag; + } else { + int nbyte1 = (prev_flag & INT8MASK(3)) + 1; + int nbyte2 = (flag & INT8MASK(3)) + 1; + if (opos + 1 + nbyte1 + nbyte2 <= byte_limit) { + uint8_t flags = prev_flag | (flag << 4); + output[opos++] = flags; + encodeFloatValue(prev_diff, prev_flag, output, &opos); + encodeFloatValue(diff, flag, output, &opos); + } else { + output[0] = 1; + memcpy(output + 1, input, byte_limit - 1); + return byte_limit; + } + } + prev_value = curr.bits; + } + + if (nelements % 2) { + int nbyte1 = (prev_flag & INT8MASK(3)) + 1; + int nbyte2 = 1; + if (opos + 1 + nbyte1 + nbyte2 <= byte_limit) { + uint8_t flags = prev_flag; + output[opos++] = flags; + encodeFloatValue(prev_diff, prev_flag, output, &opos); + encodeFloatValue(0, 0, output, &opos); + } else { + output[0] = 1; + memcpy(output + 1, input, byte_limit - 1); + return byte_limit; + } + } + + output[0] = 0; + return opos; +} + +uint32_t decodeFloatValue(const char *const input, int *const ipos, uint8_t flag) { + uint32_t diff = 0ul; + int nbytes = (flag & INT8MASK(3)) + 1; + for (int i = 0; i < nbytes; i++) { + diff = diff | ((INT32MASK(8) & input[(*ipos)++]) << BITS_PER_BYTE * i); + } + int shift_width = (FLOAT_BYTES * BITS_PER_BYTE - nbytes * BITS_PER_BYTE) * (flag >> 3); + diff <<= shift_width; + + return diff; +} + +int tsDecompressFloatImp(const char *const input, const int nelements, char *const output) { + float *ostream = (float *)output; + + if (input[0] == 1) { + memcpy(output, input + 1, nelements * FLOAT_BYTES); + return nelements * FLOAT_BYTES; + } + + uint8_t flags = 0; + int ipos = 1; + int opos = 0; + uint32_t prev_value = 0; + + for (int i = 0; i < nelements; i++) { + if (i % 2 == 0) { + flags = input[ipos++]; + } + + uint8_t flag = flags & INT8MASK(4); + flags >>= 4; + + uint32_t diff = decodeFloatValue(input, &ipos, flag); + union { + uint32_t bits; + float real; + } curr; + + uint32_t predicted = prev_value; + curr.bits = predicted ^ diff; + prev_value = curr.bits; + + ostream[opos++] = curr.real; + } + + return nelements * FLOAT_BYTES; +} diff --git a/src/util/src/tcrc32c.c b/src/util/src/tcrc32c.c new file mode 100644 index 000000000000..d071f407d28b --- /dev/null +++ b/src/util/src/tcrc32c.c @@ -0,0 +1,1380 @@ +/* + Copyright (c) 2013 - 2014, 2016 Mark Adler, Robert Vazan, Max Vysokikh + + This software is provided 'as-is', without any express or implied + warranty. In no event will the author be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + */ +#include +#include +#include + +#include "tcrc32c.h" +//todo : use the original source code + +#define POLY 0x82f63b78 +#define LONG_SHIFT 8192 +#define SHORT_SHIFT 256 + +static uint32_t table[16][256] = { + {0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, + 0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c, + 0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc, + 0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512, + 0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad, + 0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf, + 0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f, + 0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f, + 0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e, + 0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e, + 0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de, + 0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4, + 0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b, + 0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5, + 0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975, + 0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905, + 0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8, + 0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8, + 0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78, + 0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6, + 0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69, + 0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351}, + + {0x00000000, 0x13a29877, 0x274530ee, 0x34e7a899, 0x4e8a61dc, 0x5d28f9ab, + 0x69cf5132, 0x7a6dc945, 0x9d14c3b8, 0x8eb65bcf, 0xba51f356, 0xa9f36b21, + 0xd39ea264, 0xc03c3a13, 0xf4db928a, 0xe7790afd, 0x3fc5f181, 0x2c6769f6, + 0x1880c16f, 0x0b225918, 0x714f905d, 0x62ed082a, 0x560aa0b3, 0x45a838c4, + 0xa2d13239, 0xb173aa4e, 0x859402d7, 0x96369aa0, 0xec5b53e5, 0xfff9cb92, + 0xcb1e630b, 0xd8bcfb7c, 0x7f8be302, 0x6c297b75, 0x58ced3ec, 0x4b6c4b9b, + 0x310182de, 0x22a31aa9, 0x1644b230, 0x05e62a47, 0xe29f20ba, 0xf13db8cd, + 0xc5da1054, 0xd6788823, 0xac154166, 0xbfb7d911, 0x8b507188, 0x98f2e9ff, + 0x404e1283, 0x53ec8af4, 0x670b226d, 0x74a9ba1a, 0x0ec4735f, 0x1d66eb28, + 0x298143b1, 0x3a23dbc6, 0xdd5ad13b, 0xcef8494c, 0xfa1fe1d5, 0xe9bd79a2, + 0x93d0b0e7, 0x80722890, 0xb4958009, 0xa737187e, 0xff17c604, 0xecb55e73, + 0xd852f6ea, 0xcbf06e9d, 0xb19da7d8, 0xa23f3faf, 0x96d89736, 0x857a0f41, + 0x620305bc, 0x71a19dcb, 0x45463552, 0x56e4ad25, 0x2c896460, 0x3f2bfc17, + 0x0bcc548e, 0x186eccf9, 0xc0d23785, 0xd370aff2, 0xe797076b, 0xf4359f1c, + 0x8e585659, 0x9dface2e, 0xa91d66b7, 0xbabffec0, 0x5dc6f43d, 0x4e646c4a, + 0x7a83c4d3, 0x69215ca4, 0x134c95e1, 0x00ee0d96, 0x3409a50f, 0x27ab3d78, + 0x809c2506, 0x933ebd71, 0xa7d915e8, 0xb47b8d9f, 0xce1644da, 0xddb4dcad, + 0xe9537434, 0xfaf1ec43, 0x1d88e6be, 0x0e2a7ec9, 0x3acdd650, 0x296f4e27, + 0x53028762, 0x40a01f15, 0x7447b78c, 0x67e52ffb, 0xbf59d487, 0xacfb4cf0, + 0x981ce469, 0x8bbe7c1e, 0xf1d3b55b, 0xe2712d2c, 0xd69685b5, 0xc5341dc2, + 0x224d173f, 0x31ef8f48, 0x050827d1, 0x16aabfa6, 0x6cc776e3, 0x7f65ee94, + 0x4b82460d, 0x5820de7a, 0xfbc3faf9, 0xe861628e, 0xdc86ca17, 0xcf245260, + 0xb5499b25, 0xa6eb0352, 0x920cabcb, 0x81ae33bc, 0x66d73941, 0x7575a136, + 0x419209af, 0x523091d8, 0x285d589d, 0x3bffc0ea, 0x0f186873, 0x1cbaf004, + 0xc4060b78, 0xd7a4930f, 0xe3433b96, 0xf0e1a3e1, 0x8a8c6aa4, 0x992ef2d3, + 0xadc95a4a, 0xbe6bc23d, 0x5912c8c0, 0x4ab050b7, 0x7e57f82e, 0x6df56059, + 0x1798a91c, 0x043a316b, 0x30dd99f2, 0x237f0185, 0x844819fb, 0x97ea818c, + 0xa30d2915, 0xb0afb162, 0xcac27827, 0xd960e050, 0xed8748c9, 0xfe25d0be, + 0x195cda43, 0x0afe4234, 0x3e19eaad, 0x2dbb72da, 0x57d6bb9f, 0x447423e8, + 0x70938b71, 0x63311306, 0xbb8de87a, 0xa82f700d, 0x9cc8d894, 0x8f6a40e3, + 0xf50789a6, 0xe6a511d1, 0xd242b948, 0xc1e0213f, 0x26992bc2, 0x353bb3b5, + 0x01dc1b2c, 0x127e835b, 0x68134a1e, 0x7bb1d269, 0x4f567af0, 0x5cf4e287, + 0x04d43cfd, 0x1776a48a, 0x23910c13, 0x30339464, 0x4a5e5d21, 0x59fcc556, + 0x6d1b6dcf, 0x7eb9f5b8, 0x99c0ff45, 0x8a626732, 0xbe85cfab, 0xad2757dc, + 0xd74a9e99, 0xc4e806ee, 0xf00fae77, 0xe3ad3600, 0x3b11cd7c, 0x28b3550b, + 0x1c54fd92, 0x0ff665e5, 0x759baca0, 0x663934d7, 0x52de9c4e, 0x417c0439, + 0xa6050ec4, 0xb5a796b3, 0x81403e2a, 0x92e2a65d, 0xe88f6f18, 0xfb2df76f, + 0xcfca5ff6, 0xdc68c781, 0x7b5fdfff, 0x68fd4788, 0x5c1aef11, 0x4fb87766, + 0x35d5be23, 0x26772654, 0x12908ecd, 0x013216ba, 0xe64b1c47, 0xf5e98430, + 0xc10e2ca9, 0xd2acb4de, 0xa8c17d9b, 0xbb63e5ec, 0x8f844d75, 0x9c26d502, + 0x449a2e7e, 0x5738b609, 0x63df1e90, 0x707d86e7, 0x0a104fa2, 0x19b2d7d5, + 0x2d557f4c, 0x3ef7e73b, 0xd98eedc6, 0xca2c75b1, 0xfecbdd28, 0xed69455f, + 0x97048c1a, 0x84a6146d, 0xb041bcf4, 0xa3e32483}, + + {0x00000000, 0xa541927e, 0x4f6f520d, 0xea2ec073, 0x9edea41a, 0x3b9f3664, + 0xd1b1f617, 0x74f06469, 0x38513ec5, 0x9d10acbb, 0x773e6cc8, 0xd27ffeb6, + 0xa68f9adf, 0x03ce08a1, 0xe9e0c8d2, 0x4ca15aac, 0x70a27d8a, 0xd5e3eff4, + 0x3fcd2f87, 0x9a8cbdf9, 0xee7cd990, 0x4b3d4bee, 0xa1138b9d, 0x045219e3, + 0x48f3434f, 0xedb2d131, 0x079c1142, 0xa2dd833c, 0xd62de755, 0x736c752b, + 0x9942b558, 0x3c032726, 0xe144fb14, 0x4405696a, 0xae2ba919, 0x0b6a3b67, + 0x7f9a5f0e, 0xdadbcd70, 0x30f50d03, 0x95b49f7d, 0xd915c5d1, 0x7c5457af, + 0x967a97dc, 0x333b05a2, 0x47cb61cb, 0xe28af3b5, 0x08a433c6, 0xade5a1b8, + 0x91e6869e, 0x34a714e0, 0xde89d493, 0x7bc846ed, 0x0f382284, 0xaa79b0fa, + 0x40577089, 0xe516e2f7, 0xa9b7b85b, 0x0cf62a25, 0xe6d8ea56, 0x43997828, + 0x37691c41, 0x92288e3f, 0x78064e4c, 0xdd47dc32, 0xc76580d9, 0x622412a7, + 0x880ad2d4, 0x2d4b40aa, 0x59bb24c3, 0xfcfab6bd, 0x16d476ce, 0xb395e4b0, + 0xff34be1c, 0x5a752c62, 0xb05bec11, 0x151a7e6f, 0x61ea1a06, 0xc4ab8878, + 0x2e85480b, 0x8bc4da75, 0xb7c7fd53, 0x12866f2d, 0xf8a8af5e, 0x5de93d20, + 0x29195949, 0x8c58cb37, 0x66760b44, 0xc337993a, 0x8f96c396, 0x2ad751e8, + 0xc0f9919b, 0x65b803e5, 0x1148678c, 0xb409f5f2, 0x5e273581, 0xfb66a7ff, + 0x26217bcd, 0x8360e9b3, 0x694e29c0, 0xcc0fbbbe, 0xb8ffdfd7, 0x1dbe4da9, + 0xf7908dda, 0x52d11fa4, 0x1e704508, 0xbb31d776, 0x511f1705, 0xf45e857b, + 0x80aee112, 0x25ef736c, 0xcfc1b31f, 0x6a802161, 0x56830647, 0xf3c29439, + 0x19ec544a, 0xbcadc634, 0xc85da25d, 0x6d1c3023, 0x8732f050, 0x2273622e, + 0x6ed23882, 0xcb93aafc, 0x21bd6a8f, 0x84fcf8f1, 0xf00c9c98, 0x554d0ee6, + 0xbf63ce95, 0x1a225ceb, 0x8b277743, 0x2e66e53d, 0xc448254e, 0x6109b730, + 0x15f9d359, 0xb0b84127, 0x5a968154, 0xffd7132a, 0xb3764986, 0x1637dbf8, + 0xfc191b8b, 0x595889f5, 0x2da8ed9c, 0x88e97fe2, 0x62c7bf91, 0xc7862def, + 0xfb850ac9, 0x5ec498b7, 0xb4ea58c4, 0x11abcaba, 0x655baed3, 0xc01a3cad, + 0x2a34fcde, 0x8f756ea0, 0xc3d4340c, 0x6695a672, 0x8cbb6601, 0x29faf47f, + 0x5d0a9016, 0xf84b0268, 0x1265c21b, 0xb7245065, 0x6a638c57, 0xcf221e29, + 0x250cde5a, 0x804d4c24, 0xf4bd284d, 0x51fcba33, 0xbbd27a40, 0x1e93e83e, + 0x5232b292, 0xf77320ec, 0x1d5de09f, 0xb81c72e1, 0xccec1688, 0x69ad84f6, + 0x83834485, 0x26c2d6fb, 0x1ac1f1dd, 0xbf8063a3, 0x55aea3d0, 0xf0ef31ae, + 0x841f55c7, 0x215ec7b9, 0xcb7007ca, 0x6e3195b4, 0x2290cf18, 0x87d15d66, + 0x6dff9d15, 0xc8be0f6b, 0xbc4e6b02, 0x190ff97c, 0xf321390f, 0x5660ab71, + 0x4c42f79a, 0xe90365e4, 0x032da597, 0xa66c37e9, 0xd29c5380, 0x77ddc1fe, + 0x9df3018d, 0x38b293f3, 0x7413c95f, 0xd1525b21, 0x3b7c9b52, 0x9e3d092c, + 0xeacd6d45, 0x4f8cff3b, 0xa5a23f48, 0x00e3ad36, 0x3ce08a10, 0x99a1186e, + 0x738fd81d, 0xd6ce4a63, 0xa23e2e0a, 0x077fbc74, 0xed517c07, 0x4810ee79, + 0x04b1b4d5, 0xa1f026ab, 0x4bdee6d8, 0xee9f74a6, 0x9a6f10cf, 0x3f2e82b1, + 0xd50042c2, 0x7041d0bc, 0xad060c8e, 0x08479ef0, 0xe2695e83, 0x4728ccfd, + 0x33d8a894, 0x96993aea, 0x7cb7fa99, 0xd9f668e7, 0x9557324b, 0x3016a035, + 0xda386046, 0x7f79f238, 0x0b899651, 0xaec8042f, 0x44e6c45c, 0xe1a75622, + 0xdda47104, 0x78e5e37a, 0x92cb2309, 0x378ab177, 0x437ad51e, 0xe63b4760, + 0x0c158713, 0xa954156d, 0xe5f54fc1, 0x40b4ddbf, 0xaa9a1dcc, 0x0fdb8fb2, + 0x7b2bebdb, 0xde6a79a5, 0x3444b9d6, 0x91052ba8}, + + {0x00000000, 0xdd45aab8, 0xbf672381, 0x62228939, 0x7b2231f3, 0xa6679b4b, + 0xc4451272, 0x1900b8ca, 0xf64463e6, 0x2b01c95e, 0x49234067, 0x9466eadf, + 0x8d665215, 0x5023f8ad, 0x32017194, 0xef44db2c, 0xe964b13d, 0x34211b85, + 0x560392bc, 0x8b463804, 0x924680ce, 0x4f032a76, 0x2d21a34f, 0xf06409f7, + 0x1f20d2db, 0xc2657863, 0xa047f15a, 0x7d025be2, 0x6402e328, 0xb9474990, + 0xdb65c0a9, 0x06206a11, 0xd725148b, 0x0a60be33, 0x6842370a, 0xb5079db2, + 0xac072578, 0x71428fc0, 0x136006f9, 0xce25ac41, 0x2161776d, 0xfc24ddd5, + 0x9e0654ec, 0x4343fe54, 0x5a43469e, 0x8706ec26, 0xe524651f, 0x3861cfa7, + 0x3e41a5b6, 0xe3040f0e, 0x81268637, 0x5c632c8f, 0x45639445, 0x98263efd, + 0xfa04b7c4, 0x27411d7c, 0xc805c650, 0x15406ce8, 0x7762e5d1, 0xaa274f69, + 0xb327f7a3, 0x6e625d1b, 0x0c40d422, 0xd1057e9a, 0xaba65fe7, 0x76e3f55f, + 0x14c17c66, 0xc984d6de, 0xd0846e14, 0x0dc1c4ac, 0x6fe34d95, 0xb2a6e72d, + 0x5de23c01, 0x80a796b9, 0xe2851f80, 0x3fc0b538, 0x26c00df2, 0xfb85a74a, + 0x99a72e73, 0x44e284cb, 0x42c2eeda, 0x9f874462, 0xfda5cd5b, 0x20e067e3, + 0x39e0df29, 0xe4a57591, 0x8687fca8, 0x5bc25610, 0xb4868d3c, 0x69c32784, + 0x0be1aebd, 0xd6a40405, 0xcfa4bccf, 0x12e11677, 0x70c39f4e, 0xad8635f6, + 0x7c834b6c, 0xa1c6e1d4, 0xc3e468ed, 0x1ea1c255, 0x07a17a9f, 0xdae4d027, + 0xb8c6591e, 0x6583f3a6, 0x8ac7288a, 0x57828232, 0x35a00b0b, 0xe8e5a1b3, + 0xf1e51979, 0x2ca0b3c1, 0x4e823af8, 0x93c79040, 0x95e7fa51, 0x48a250e9, + 0x2a80d9d0, 0xf7c57368, 0xeec5cba2, 0x3380611a, 0x51a2e823, 0x8ce7429b, + 0x63a399b7, 0xbee6330f, 0xdcc4ba36, 0x0181108e, 0x1881a844, 0xc5c402fc, + 0xa7e68bc5, 0x7aa3217d, 0x52a0c93f, 0x8fe56387, 0xedc7eabe, 0x30824006, + 0x2982f8cc, 0xf4c75274, 0x96e5db4d, 0x4ba071f5, 0xa4e4aad9, 0x79a10061, + 0x1b838958, 0xc6c623e0, 0xdfc69b2a, 0x02833192, 0x60a1b8ab, 0xbde41213, + 0xbbc47802, 0x6681d2ba, 0x04a35b83, 0xd9e6f13b, 0xc0e649f1, 0x1da3e349, + 0x7f816a70, 0xa2c4c0c8, 0x4d801be4, 0x90c5b15c, 0xf2e73865, 0x2fa292dd, + 0x36a22a17, 0xebe780af, 0x89c50996, 0x5480a32e, 0x8585ddb4, 0x58c0770c, + 0x3ae2fe35, 0xe7a7548d, 0xfea7ec47, 0x23e246ff, 0x41c0cfc6, 0x9c85657e, + 0x73c1be52, 0xae8414ea, 0xcca69dd3, 0x11e3376b, 0x08e38fa1, 0xd5a62519, + 0xb784ac20, 0x6ac10698, 0x6ce16c89, 0xb1a4c631, 0xd3864f08, 0x0ec3e5b0, + 0x17c35d7a, 0xca86f7c2, 0xa8a47efb, 0x75e1d443, 0x9aa50f6f, 0x47e0a5d7, + 0x25c22cee, 0xf8878656, 0xe1873e9c, 0x3cc29424, 0x5ee01d1d, 0x83a5b7a5, + 0xf90696d8, 0x24433c60, 0x4661b559, 0x9b241fe1, 0x8224a72b, 0x5f610d93, + 0x3d4384aa, 0xe0062e12, 0x0f42f53e, 0xd2075f86, 0xb025d6bf, 0x6d607c07, + 0x7460c4cd, 0xa9256e75, 0xcb07e74c, 0x16424df4, 0x106227e5, 0xcd278d5d, + 0xaf050464, 0x7240aedc, 0x6b401616, 0xb605bcae, 0xd4273597, 0x09629f2f, + 0xe6264403, 0x3b63eebb, 0x59416782, 0x8404cd3a, 0x9d0475f0, 0x4041df48, + 0x22635671, 0xff26fcc9, 0x2e238253, 0xf36628eb, 0x9144a1d2, 0x4c010b6a, + 0x5501b3a0, 0x88441918, 0xea669021, 0x37233a99, 0xd867e1b5, 0x05224b0d, + 0x6700c234, 0xba45688c, 0xa345d046, 0x7e007afe, 0x1c22f3c7, 0xc167597f, + 0xc747336e, 0x1a0299d6, 0x782010ef, 0xa565ba57, 0xbc65029d, 0x6120a825, + 0x0302211c, 0xde478ba4, 0x31035088, 0xec46fa30, 0x8e647309, 0x5321d9b1, + 0x4a21617b, 0x9764cbc3, 0xf54642fa, 0x2803e842}, + + {0x00000000, 0x38116fac, 0x7022df58, 0x4833b0f4, 0xe045beb0, 0xd854d11c, + 0x906761e8, 0xa8760e44, 0xc5670b91, 0xfd76643d, 0xb545d4c9, 0x8d54bb65, + 0x2522b521, 0x1d33da8d, 0x55006a79, 0x6d1105d5, 0x8f2261d3, 0xb7330e7f, + 0xff00be8b, 0xc711d127, 0x6f67df63, 0x5776b0cf, 0x1f45003b, 0x27546f97, + 0x4a456a42, 0x725405ee, 0x3a67b51a, 0x0276dab6, 0xaa00d4f2, 0x9211bb5e, + 0xda220baa, 0xe2336406, 0x1ba8b557, 0x23b9dafb, 0x6b8a6a0f, 0x539b05a3, + 0xfbed0be7, 0xc3fc644b, 0x8bcfd4bf, 0xb3debb13, 0xdecfbec6, 0xe6ded16a, + 0xaeed619e, 0x96fc0e32, 0x3e8a0076, 0x069b6fda, 0x4ea8df2e, 0x76b9b082, + 0x948ad484, 0xac9bbb28, 0xe4a80bdc, 0xdcb96470, 0x74cf6a34, 0x4cde0598, + 0x04edb56c, 0x3cfcdac0, 0x51eddf15, 0x69fcb0b9, 0x21cf004d, 0x19de6fe1, + 0xb1a861a5, 0x89b90e09, 0xc18abefd, 0xf99bd151, 0x37516aae, 0x0f400502, + 0x4773b5f6, 0x7f62da5a, 0xd714d41e, 0xef05bbb2, 0xa7360b46, 0x9f2764ea, + 0xf236613f, 0xca270e93, 0x8214be67, 0xba05d1cb, 0x1273df8f, 0x2a62b023, + 0x625100d7, 0x5a406f7b, 0xb8730b7d, 0x806264d1, 0xc851d425, 0xf040bb89, + 0x5836b5cd, 0x6027da61, 0x28146a95, 0x10050539, 0x7d1400ec, 0x45056f40, + 0x0d36dfb4, 0x3527b018, 0x9d51be5c, 0xa540d1f0, 0xed736104, 0xd5620ea8, + 0x2cf9dff9, 0x14e8b055, 0x5cdb00a1, 0x64ca6f0d, 0xccbc6149, 0xf4ad0ee5, + 0xbc9ebe11, 0x848fd1bd, 0xe99ed468, 0xd18fbbc4, 0x99bc0b30, 0xa1ad649c, + 0x09db6ad8, 0x31ca0574, 0x79f9b580, 0x41e8da2c, 0xa3dbbe2a, 0x9bcad186, + 0xd3f96172, 0xebe80ede, 0x439e009a, 0x7b8f6f36, 0x33bcdfc2, 0x0badb06e, + 0x66bcb5bb, 0x5eadda17, 0x169e6ae3, 0x2e8f054f, 0x86f90b0b, 0xbee864a7, + 0xf6dbd453, 0xcecabbff, 0x6ea2d55c, 0x56b3baf0, 0x1e800a04, 0x269165a8, + 0x8ee76bec, 0xb6f60440, 0xfec5b4b4, 0xc6d4db18, 0xabc5decd, 0x93d4b161, + 0xdbe70195, 0xe3f66e39, 0x4b80607d, 0x73910fd1, 0x3ba2bf25, 0x03b3d089, + 0xe180b48f, 0xd991db23, 0x91a26bd7, 0xa9b3047b, 0x01c50a3f, 0x39d46593, + 0x71e7d567, 0x49f6bacb, 0x24e7bf1e, 0x1cf6d0b2, 0x54c56046, 0x6cd40fea, + 0xc4a201ae, 0xfcb36e02, 0xb480def6, 0x8c91b15a, 0x750a600b, 0x4d1b0fa7, + 0x0528bf53, 0x3d39d0ff, 0x954fdebb, 0xad5eb117, 0xe56d01e3, 0xdd7c6e4f, + 0xb06d6b9a, 0x887c0436, 0xc04fb4c2, 0xf85edb6e, 0x5028d52a, 0x6839ba86, + 0x200a0a72, 0x181b65de, 0xfa2801d8, 0xc2396e74, 0x8a0ade80, 0xb21bb12c, + 0x1a6dbf68, 0x227cd0c4, 0x6a4f6030, 0x525e0f9c, 0x3f4f0a49, 0x075e65e5, + 0x4f6dd511, 0x777cbabd, 0xdf0ab4f9, 0xe71bdb55, 0xaf286ba1, 0x9739040d, + 0x59f3bff2, 0x61e2d05e, 0x29d160aa, 0x11c00f06, 0xb9b60142, 0x81a76eee, + 0xc994de1a, 0xf185b1b6, 0x9c94b463, 0xa485dbcf, 0xecb66b3b, 0xd4a70497, + 0x7cd10ad3, 0x44c0657f, 0x0cf3d58b, 0x34e2ba27, 0xd6d1de21, 0xeec0b18d, + 0xa6f30179, 0x9ee26ed5, 0x36946091, 0x0e850f3d, 0x46b6bfc9, 0x7ea7d065, + 0x13b6d5b0, 0x2ba7ba1c, 0x63940ae8, 0x5b856544, 0xf3f36b00, 0xcbe204ac, + 0x83d1b458, 0xbbc0dbf4, 0x425b0aa5, 0x7a4a6509, 0x3279d5fd, 0x0a68ba51, + 0xa21eb415, 0x9a0fdbb9, 0xd23c6b4d, 0xea2d04e1, 0x873c0134, 0xbf2d6e98, + 0xf71ede6c, 0xcf0fb1c0, 0x6779bf84, 0x5f68d028, 0x175b60dc, 0x2f4a0f70, + 0xcd796b76, 0xf56804da, 0xbd5bb42e, 0x854adb82, 0x2d3cd5c6, 0x152dba6a, + 0x5d1e0a9e, 0x650f6532, 0x081e60e7, 0x300f0f4b, 0x783cbfbf, 0x402dd013, + 0xe85bde57, 0xd04ab1fb, 0x9879010f, 0xa0686ea3}, + + {0x00000000, 0xef306b19, 0xdb8ca0c3, 0x34bccbda, 0xb2f53777, 0x5dc55c6e, + 0x697997b4, 0x8649fcad, 0x6006181f, 0x8f367306, 0xbb8ab8dc, 0x54bad3c5, + 0xd2f32f68, 0x3dc34471, 0x097f8fab, 0xe64fe4b2, 0xc00c303e, 0x2f3c5b27, + 0x1b8090fd, 0xf4b0fbe4, 0x72f90749, 0x9dc96c50, 0xa975a78a, 0x4645cc93, + 0xa00a2821, 0x4f3a4338, 0x7b8688e2, 0x94b6e3fb, 0x12ff1f56, 0xfdcf744f, + 0xc973bf95, 0x2643d48c, 0x85f4168d, 0x6ac47d94, 0x5e78b64e, 0xb148dd57, + 0x370121fa, 0xd8314ae3, 0xec8d8139, 0x03bdea20, 0xe5f20e92, 0x0ac2658b, + 0x3e7eae51, 0xd14ec548, 0x570739e5, 0xb83752fc, 0x8c8b9926, 0x63bbf23f, + 0x45f826b3, 0xaac84daa, 0x9e748670, 0x7144ed69, 0xf70d11c4, 0x183d7add, + 0x2c81b107, 0xc3b1da1e, 0x25fe3eac, 0xcace55b5, 0xfe729e6f, 0x1142f576, + 0x970b09db, 0x783b62c2, 0x4c87a918, 0xa3b7c201, 0x0e045beb, 0xe13430f2, + 0xd588fb28, 0x3ab89031, 0xbcf16c9c, 0x53c10785, 0x677dcc5f, 0x884da746, + 0x6e0243f4, 0x813228ed, 0xb58ee337, 0x5abe882e, 0xdcf77483, 0x33c71f9a, + 0x077bd440, 0xe84bbf59, 0xce086bd5, 0x213800cc, 0x1584cb16, 0xfab4a00f, + 0x7cfd5ca2, 0x93cd37bb, 0xa771fc61, 0x48419778, 0xae0e73ca, 0x413e18d3, + 0x7582d309, 0x9ab2b810, 0x1cfb44bd, 0xf3cb2fa4, 0xc777e47e, 0x28478f67, + 0x8bf04d66, 0x64c0267f, 0x507ceda5, 0xbf4c86bc, 0x39057a11, 0xd6351108, + 0xe289dad2, 0x0db9b1cb, 0xebf65579, 0x04c63e60, 0x307af5ba, 0xdf4a9ea3, + 0x5903620e, 0xb6330917, 0x828fc2cd, 0x6dbfa9d4, 0x4bfc7d58, 0xa4cc1641, + 0x9070dd9b, 0x7f40b682, 0xf9094a2f, 0x16392136, 0x2285eaec, 0xcdb581f5, + 0x2bfa6547, 0xc4ca0e5e, 0xf076c584, 0x1f46ae9d, 0x990f5230, 0x763f3929, + 0x4283f2f3, 0xadb399ea, 0x1c08b7d6, 0xf338dccf, 0xc7841715, 0x28b47c0c, + 0xaefd80a1, 0x41cdebb8, 0x75712062, 0x9a414b7b, 0x7c0eafc9, 0x933ec4d0, + 0xa7820f0a, 0x48b26413, 0xcefb98be, 0x21cbf3a7, 0x1577387d, 0xfa475364, + 0xdc0487e8, 0x3334ecf1, 0x0788272b, 0xe8b84c32, 0x6ef1b09f, 0x81c1db86, + 0xb57d105c, 0x5a4d7b45, 0xbc029ff7, 0x5332f4ee, 0x678e3f34, 0x88be542d, + 0x0ef7a880, 0xe1c7c399, 0xd57b0843, 0x3a4b635a, 0x99fca15b, 0x76ccca42, + 0x42700198, 0xad406a81, 0x2b09962c, 0xc439fd35, 0xf08536ef, 0x1fb55df6, + 0xf9fab944, 0x16cad25d, 0x22761987, 0xcd46729e, 0x4b0f8e33, 0xa43fe52a, + 0x90832ef0, 0x7fb345e9, 0x59f09165, 0xb6c0fa7c, 0x827c31a6, 0x6d4c5abf, + 0xeb05a612, 0x0435cd0b, 0x308906d1, 0xdfb96dc8, 0x39f6897a, 0xd6c6e263, + 0xe27a29b9, 0x0d4a42a0, 0x8b03be0d, 0x6433d514, 0x508f1ece, 0xbfbf75d7, + 0x120cec3d, 0xfd3c8724, 0xc9804cfe, 0x26b027e7, 0xa0f9db4a, 0x4fc9b053, + 0x7b757b89, 0x94451090, 0x720af422, 0x9d3a9f3b, 0xa98654e1, 0x46b63ff8, + 0xc0ffc355, 0x2fcfa84c, 0x1b736396, 0xf443088f, 0xd200dc03, 0x3d30b71a, + 0x098c7cc0, 0xe6bc17d9, 0x60f5eb74, 0x8fc5806d, 0xbb794bb7, 0x544920ae, + 0xb206c41c, 0x5d36af05, 0x698a64df, 0x86ba0fc6, 0x00f3f36b, 0xefc39872, + 0xdb7f53a8, 0x344f38b1, 0x97f8fab0, 0x78c891a9, 0x4c745a73, 0xa344316a, + 0x250dcdc7, 0xca3da6de, 0xfe816d04, 0x11b1061d, 0xf7fee2af, 0x18ce89b6, + 0x2c72426c, 0xc3422975, 0x450bd5d8, 0xaa3bbec1, 0x9e87751b, 0x71b71e02, + 0x57f4ca8e, 0xb8c4a197, 0x8c786a4d, 0x63480154, 0xe501fdf9, 0x0a3196e0, + 0x3e8d5d3a, 0xd1bd3623, 0x37f2d291, 0xd8c2b988, 0xec7e7252, 0x034e194b, + 0x8507e5e6, 0x6a378eff, 0x5e8b4525, 0xb1bb2e3c}, + + {0x00000000, 0x68032cc8, 0xd0065990, 0xb8057558, 0xa5e0c5d1, 0xcde3e919, + 0x75e69c41, 0x1de5b089, 0x4e2dfd53, 0x262ed19b, 0x9e2ba4c3, 0xf628880b, + 0xebcd3882, 0x83ce144a, 0x3bcb6112, 0x53c84dda, 0x9c5bfaa6, 0xf458d66e, + 0x4c5da336, 0x245e8ffe, 0x39bb3f77, 0x51b813bf, 0xe9bd66e7, 0x81be4a2f, + 0xd27607f5, 0xba752b3d, 0x02705e65, 0x6a7372ad, 0x7796c224, 0x1f95eeec, + 0xa7909bb4, 0xcf93b77c, 0x3d5b83bd, 0x5558af75, 0xed5dda2d, 0x855ef6e5, + 0x98bb466c, 0xf0b86aa4, 0x48bd1ffc, 0x20be3334, 0x73767eee, 0x1b755226, + 0xa370277e, 0xcb730bb6, 0xd696bb3f, 0xbe9597f7, 0x0690e2af, 0x6e93ce67, + 0xa100791b, 0xc90355d3, 0x7106208b, 0x19050c43, 0x04e0bcca, 0x6ce39002, + 0xd4e6e55a, 0xbce5c992, 0xef2d8448, 0x872ea880, 0x3f2bddd8, 0x5728f110, + 0x4acd4199, 0x22ce6d51, 0x9acb1809, 0xf2c834c1, 0x7ab7077a, 0x12b42bb2, + 0xaab15eea, 0xc2b27222, 0xdf57c2ab, 0xb754ee63, 0x0f519b3b, 0x6752b7f3, + 0x349afa29, 0x5c99d6e1, 0xe49ca3b9, 0x8c9f8f71, 0x917a3ff8, 0xf9791330, + 0x417c6668, 0x297f4aa0, 0xe6ecfddc, 0x8eefd114, 0x36eaa44c, 0x5ee98884, + 0x430c380d, 0x2b0f14c5, 0x930a619d, 0xfb094d55, 0xa8c1008f, 0xc0c22c47, + 0x78c7591f, 0x10c475d7, 0x0d21c55e, 0x6522e996, 0xdd279cce, 0xb524b006, + 0x47ec84c7, 0x2fefa80f, 0x97eadd57, 0xffe9f19f, 0xe20c4116, 0x8a0f6dde, + 0x320a1886, 0x5a09344e, 0x09c17994, 0x61c2555c, 0xd9c72004, 0xb1c40ccc, + 0xac21bc45, 0xc422908d, 0x7c27e5d5, 0x1424c91d, 0xdbb77e61, 0xb3b452a9, + 0x0bb127f1, 0x63b20b39, 0x7e57bbb0, 0x16549778, 0xae51e220, 0xc652cee8, + 0x959a8332, 0xfd99affa, 0x459cdaa2, 0x2d9ff66a, 0x307a46e3, 0x58796a2b, + 0xe07c1f73, 0x887f33bb, 0xf56e0ef4, 0x9d6d223c, 0x25685764, 0x4d6b7bac, + 0x508ecb25, 0x388de7ed, 0x808892b5, 0xe88bbe7d, 0xbb43f3a7, 0xd340df6f, + 0x6b45aa37, 0x034686ff, 0x1ea33676, 0x76a01abe, 0xcea56fe6, 0xa6a6432e, + 0x6935f452, 0x0136d89a, 0xb933adc2, 0xd130810a, 0xccd53183, 0xa4d61d4b, + 0x1cd36813, 0x74d044db, 0x27180901, 0x4f1b25c9, 0xf71e5091, 0x9f1d7c59, + 0x82f8ccd0, 0xeafbe018, 0x52fe9540, 0x3afdb988, 0xc8358d49, 0xa036a181, + 0x1833d4d9, 0x7030f811, 0x6dd54898, 0x05d66450, 0xbdd31108, 0xd5d03dc0, + 0x8618701a, 0xee1b5cd2, 0x561e298a, 0x3e1d0542, 0x23f8b5cb, 0x4bfb9903, + 0xf3feec5b, 0x9bfdc093, 0x546e77ef, 0x3c6d5b27, 0x84682e7f, 0xec6b02b7, + 0xf18eb23e, 0x998d9ef6, 0x2188ebae, 0x498bc766, 0x1a438abc, 0x7240a674, + 0xca45d32c, 0xa246ffe4, 0xbfa34f6d, 0xd7a063a5, 0x6fa516fd, 0x07a63a35, + 0x8fd9098e, 0xe7da2546, 0x5fdf501e, 0x37dc7cd6, 0x2a39cc5f, 0x423ae097, + 0xfa3f95cf, 0x923cb907, 0xc1f4f4dd, 0xa9f7d815, 0x11f2ad4d, 0x79f18185, + 0x6414310c, 0x0c171dc4, 0xb412689c, 0xdc114454, 0x1382f328, 0x7b81dfe0, + 0xc384aab8, 0xab878670, 0xb66236f9, 0xde611a31, 0x66646f69, 0x0e6743a1, + 0x5daf0e7b, 0x35ac22b3, 0x8da957eb, 0xe5aa7b23, 0xf84fcbaa, 0x904ce762, + 0x2849923a, 0x404abef2, 0xb2828a33, 0xda81a6fb, 0x6284d3a3, 0x0a87ff6b, + 0x17624fe2, 0x7f61632a, 0xc7641672, 0xaf673aba, 0xfcaf7760, 0x94ac5ba8, + 0x2ca92ef0, 0x44aa0238, 0x594fb2b1, 0x314c9e79, 0x8949eb21, 0xe14ac7e9, + 0x2ed97095, 0x46da5c5d, 0xfedf2905, 0x96dc05cd, 0x8b39b544, 0xe33a998c, + 0x5b3fecd4, 0x333cc01c, 0x60f48dc6, 0x08f7a10e, 0xb0f2d456, 0xd8f1f89e, + 0xc5144817, 0xad1764df, 0x15121187, 0x7d113d4f}, + + {0x00000000, 0x493c7d27, 0x9278fa4e, 0xdb448769, 0x211d826d, 0x6821ff4a, + 0xb3657823, 0xfa590504, 0x423b04da, 0x0b0779fd, 0xd043fe94, 0x997f83b3, + 0x632686b7, 0x2a1afb90, 0xf15e7cf9, 0xb86201de, 0x847609b4, 0xcd4a7493, + 0x160ef3fa, 0x5f328edd, 0xa56b8bd9, 0xec57f6fe, 0x37137197, 0x7e2f0cb0, + 0xc64d0d6e, 0x8f717049, 0x5435f720, 0x1d098a07, 0xe7508f03, 0xae6cf224, + 0x7528754d, 0x3c14086a, 0x0d006599, 0x443c18be, 0x9f789fd7, 0xd644e2f0, + 0x2c1de7f4, 0x65219ad3, 0xbe651dba, 0xf759609d, 0x4f3b6143, 0x06071c64, + 0xdd439b0d, 0x947fe62a, 0x6e26e32e, 0x271a9e09, 0xfc5e1960, 0xb5626447, + 0x89766c2d, 0xc04a110a, 0x1b0e9663, 0x5232eb44, 0xa86bee40, 0xe1579367, + 0x3a13140e, 0x732f6929, 0xcb4d68f7, 0x827115d0, 0x593592b9, 0x1009ef9e, + 0xea50ea9a, 0xa36c97bd, 0x782810d4, 0x31146df3, 0x1a00cb32, 0x533cb615, + 0x8878317c, 0xc1444c5b, 0x3b1d495f, 0x72213478, 0xa965b311, 0xe059ce36, + 0x583bcfe8, 0x1107b2cf, 0xca4335a6, 0x837f4881, 0x79264d85, 0x301a30a2, + 0xeb5eb7cb, 0xa262caec, 0x9e76c286, 0xd74abfa1, 0x0c0e38c8, 0x453245ef, + 0xbf6b40eb, 0xf6573dcc, 0x2d13baa5, 0x642fc782, 0xdc4dc65c, 0x9571bb7b, + 0x4e353c12, 0x07094135, 0xfd504431, 0xb46c3916, 0x6f28be7f, 0x2614c358, + 0x1700aeab, 0x5e3cd38c, 0x857854e5, 0xcc4429c2, 0x361d2cc6, 0x7f2151e1, + 0xa465d688, 0xed59abaf, 0x553baa71, 0x1c07d756, 0xc743503f, 0x8e7f2d18, + 0x7426281c, 0x3d1a553b, 0xe65ed252, 0xaf62af75, 0x9376a71f, 0xda4ada38, + 0x010e5d51, 0x48322076, 0xb26b2572, 0xfb575855, 0x2013df3c, 0x692fa21b, + 0xd14da3c5, 0x9871dee2, 0x4335598b, 0x0a0924ac, 0xf05021a8, 0xb96c5c8f, + 0x6228dbe6, 0x2b14a6c1, 0x34019664, 0x7d3deb43, 0xa6796c2a, 0xef45110d, + 0x151c1409, 0x5c20692e, 0x8764ee47, 0xce589360, 0x763a92be, 0x3f06ef99, + 0xe44268f0, 0xad7e15d7, 0x572710d3, 0x1e1b6df4, 0xc55fea9d, 0x8c6397ba, + 0xb0779fd0, 0xf94be2f7, 0x220f659e, 0x6b3318b9, 0x916a1dbd, 0xd856609a, + 0x0312e7f3, 0x4a2e9ad4, 0xf24c9b0a, 0xbb70e62d, 0x60346144, 0x29081c63, + 0xd3511967, 0x9a6d6440, 0x4129e329, 0x08159e0e, 0x3901f3fd, 0x703d8eda, + 0xab7909b3, 0xe2457494, 0x181c7190, 0x51200cb7, 0x8a648bde, 0xc358f6f9, + 0x7b3af727, 0x32068a00, 0xe9420d69, 0xa07e704e, 0x5a27754a, 0x131b086d, + 0xc85f8f04, 0x8163f223, 0xbd77fa49, 0xf44b876e, 0x2f0f0007, 0x66337d20, + 0x9c6a7824, 0xd5560503, 0x0e12826a, 0x472eff4d, 0xff4cfe93, 0xb67083b4, + 0x6d3404dd, 0x240879fa, 0xde517cfe, 0x976d01d9, 0x4c2986b0, 0x0515fb97, + 0x2e015d56, 0x673d2071, 0xbc79a718, 0xf545da3f, 0x0f1cdf3b, 0x4620a21c, + 0x9d642575, 0xd4585852, 0x6c3a598c, 0x250624ab, 0xfe42a3c2, 0xb77edee5, + 0x4d27dbe1, 0x041ba6c6, 0xdf5f21af, 0x96635c88, 0xaa7754e2, 0xe34b29c5, + 0x380faeac, 0x7133d38b, 0x8b6ad68f, 0xc256aba8, 0x19122cc1, 0x502e51e6, + 0xe84c5038, 0xa1702d1f, 0x7a34aa76, 0x3308d751, 0xc951d255, 0x806daf72, + 0x5b29281b, 0x1215553c, 0x230138cf, 0x6a3d45e8, 0xb179c281, 0xf845bfa6, + 0x021cbaa2, 0x4b20c785, 0x906440ec, 0xd9583dcb, 0x613a3c15, 0x28064132, + 0xf342c65b, 0xba7ebb7c, 0x4027be78, 0x091bc35f, 0xd25f4436, 0x9b633911, + 0xa777317b, 0xee4b4c5c, 0x350fcb35, 0x7c33b612, 0x866ab316, 0xcf56ce31, + 0x14124958, 0x5d2e347f, 0xe54c35a1, 0xac704886, 0x7734cfef, 0x3e08b2c8, + 0xc451b7cc, 0x8d6dcaeb, 0x56294d82, 0x1f1530a5}, + + {0x00000000, 0xf43ed648, 0xed91da61, 0x19af0c29, 0xdecfc233, 0x2af1147b, + 0x335e1852, 0xc760ce1a, 0xb873f297, 0x4c4d24df, 0x55e228f6, 0xa1dcfebe, + 0x66bc30a4, 0x9282e6ec, 0x8b2deac5, 0x7f133c8d, 0x750b93df, 0x81354597, + 0x989a49be, 0x6ca49ff6, 0xabc451ec, 0x5ffa87a4, 0x46558b8d, 0xb26b5dc5, + 0xcd786148, 0x3946b700, 0x20e9bb29, 0xd4d76d61, 0x13b7a37b, 0xe7897533, + 0xfe26791a, 0x0a18af52, 0xea1727be, 0x1e29f1f6, 0x0786fddf, 0xf3b82b97, + 0x34d8e58d, 0xc0e633c5, 0xd9493fec, 0x2d77e9a4, 0x5264d529, 0xa65a0361, + 0xbff50f48, 0x4bcbd900, 0x8cab171a, 0x7895c152, 0x613acd7b, 0x95041b33, + 0x9f1cb461, 0x6b226229, 0x728d6e00, 0x86b3b848, 0x41d37652, 0xb5eda01a, + 0xac42ac33, 0x587c7a7b, 0x276f46f6, 0xd35190be, 0xcafe9c97, 0x3ec04adf, + 0xf9a084c5, 0x0d9e528d, 0x14315ea4, 0xe00f88ec, 0xd1c2398d, 0x25fcefc5, + 0x3c53e3ec, 0xc86d35a4, 0x0f0dfbbe, 0xfb332df6, 0xe29c21df, 0x16a2f797, + 0x69b1cb1a, 0x9d8f1d52, 0x8420117b, 0x701ec733, 0xb77e0929, 0x4340df61, + 0x5aefd348, 0xaed10500, 0xa4c9aa52, 0x50f77c1a, 0x49587033, 0xbd66a67b, + 0x7a066861, 0x8e38be29, 0x9797b200, 0x63a96448, 0x1cba58c5, 0xe8848e8d, + 0xf12b82a4, 0x051554ec, 0xc2759af6, 0x364b4cbe, 0x2fe44097, 0xdbda96df, + 0x3bd51e33, 0xcfebc87b, 0xd644c452, 0x227a121a, 0xe51adc00, 0x11240a48, + 0x088b0661, 0xfcb5d029, 0x83a6eca4, 0x77983aec, 0x6e3736c5, 0x9a09e08d, + 0x5d692e97, 0xa957f8df, 0xb0f8f4f6, 0x44c622be, 0x4ede8dec, 0xbae05ba4, + 0xa34f578d, 0x577181c5, 0x90114fdf, 0x642f9997, 0x7d8095be, 0x89be43f6, + 0xf6ad7f7b, 0x0293a933, 0x1b3ca51a, 0xef027352, 0x2862bd48, 0xdc5c6b00, + 0xc5f36729, 0x31cdb161, 0xa66805eb, 0x5256d3a3, 0x4bf9df8a, 0xbfc709c2, + 0x78a7c7d8, 0x8c991190, 0x95361db9, 0x6108cbf1, 0x1e1bf77c, 0xea252134, + 0xf38a2d1d, 0x07b4fb55, 0xc0d4354f, 0x34eae307, 0x2d45ef2e, 0xd97b3966, + 0xd3639634, 0x275d407c, 0x3ef24c55, 0xcacc9a1d, 0x0dac5407, 0xf992824f, + 0xe03d8e66, 0x1403582e, 0x6b1064a3, 0x9f2eb2eb, 0x8681bec2, 0x72bf688a, + 0xb5dfa690, 0x41e170d8, 0x584e7cf1, 0xac70aab9, 0x4c7f2255, 0xb841f41d, + 0xa1eef834, 0x55d02e7c, 0x92b0e066, 0x668e362e, 0x7f213a07, 0x8b1fec4f, + 0xf40cd0c2, 0x0032068a, 0x199d0aa3, 0xeda3dceb, 0x2ac312f1, 0xdefdc4b9, + 0xc752c890, 0x336c1ed8, 0x3974b18a, 0xcd4a67c2, 0xd4e56beb, 0x20dbbda3, + 0xe7bb73b9, 0x1385a5f1, 0x0a2aa9d8, 0xfe147f90, 0x8107431d, 0x75399555, + 0x6c96997c, 0x98a84f34, 0x5fc8812e, 0xabf65766, 0xb2595b4f, 0x46678d07, + 0x77aa3c66, 0x8394ea2e, 0x9a3be607, 0x6e05304f, 0xa965fe55, 0x5d5b281d, + 0x44f42434, 0xb0caf27c, 0xcfd9cef1, 0x3be718b9, 0x22481490, 0xd676c2d8, + 0x11160cc2, 0xe528da8a, 0xfc87d6a3, 0x08b900eb, 0x02a1afb9, 0xf69f79f1, + 0xef3075d8, 0x1b0ea390, 0xdc6e6d8a, 0x2850bbc2, 0x31ffb7eb, 0xc5c161a3, + 0xbad25d2e, 0x4eec8b66, 0x5743874f, 0xa37d5107, 0x641d9f1d, 0x90234955, + 0x898c457c, 0x7db29334, 0x9dbd1bd8, 0x6983cd90, 0x702cc1b9, 0x841217f1, + 0x4372d9eb, 0xb74c0fa3, 0xaee3038a, 0x5addd5c2, 0x25cee94f, 0xd1f03f07, + 0xc85f332e, 0x3c61e566, 0xfb012b7c, 0x0f3ffd34, 0x1690f11d, 0xe2ae2755, + 0xe8b68807, 0x1c885e4f, 0x05275266, 0xf119842e, 0x36794a34, 0xc2479c7c, + 0xdbe89055, 0x2fd6461d, 0x50c57a90, 0xa4fbacd8, 0xbd54a0f1, 0x496a76b9, + 0x8e0ab8a3, 0x7a346eeb, 0x639b62c2, 0x97a5b48a}, + + {0x00000000, 0xcb567ba5, 0x934081bb, 0x5816fa1e, 0x236d7587, 0xe83b0e22, + 0xb02df43c, 0x7b7b8f99, 0x46daeb0e, 0x8d8c90ab, 0xd59a6ab5, 0x1ecc1110, + 0x65b79e89, 0xaee1e52c, 0xf6f71f32, 0x3da16497, 0x8db5d61c, 0x46e3adb9, + 0x1ef557a7, 0xd5a32c02, 0xaed8a39b, 0x658ed83e, 0x3d982220, 0xf6ce5985, + 0xcb6f3d12, 0x003946b7, 0x582fbca9, 0x9379c70c, 0xe8024895, 0x23543330, + 0x7b42c92e, 0xb014b28b, 0x1e87dac9, 0xd5d1a16c, 0x8dc75b72, 0x469120d7, + 0x3deaaf4e, 0xf6bcd4eb, 0xaeaa2ef5, 0x65fc5550, 0x585d31c7, 0x930b4a62, + 0xcb1db07c, 0x004bcbd9, 0x7b304440, 0xb0663fe5, 0xe870c5fb, 0x2326be5e, + 0x93320cd5, 0x58647770, 0x00728d6e, 0xcb24f6cb, 0xb05f7952, 0x7b0902f7, + 0x231ff8e9, 0xe849834c, 0xd5e8e7db, 0x1ebe9c7e, 0x46a86660, 0x8dfe1dc5, + 0xf685925c, 0x3dd3e9f9, 0x65c513e7, 0xae936842, 0x3d0fb592, 0xf659ce37, + 0xae4f3429, 0x65194f8c, 0x1e62c015, 0xd534bbb0, 0x8d2241ae, 0x46743a0b, + 0x7bd55e9c, 0xb0832539, 0xe895df27, 0x23c3a482, 0x58b82b1b, 0x93ee50be, + 0xcbf8aaa0, 0x00aed105, 0xb0ba638e, 0x7bec182b, 0x23fae235, 0xe8ac9990, + 0x93d71609, 0x58816dac, 0x009797b2, 0xcbc1ec17, 0xf6608880, 0x3d36f325, + 0x6520093b, 0xae76729e, 0xd50dfd07, 0x1e5b86a2, 0x464d7cbc, 0x8d1b0719, + 0x23886f5b, 0xe8de14fe, 0xb0c8eee0, 0x7b9e9545, 0x00e51adc, 0xcbb36179, + 0x93a59b67, 0x58f3e0c2, 0x65528455, 0xae04fff0, 0xf61205ee, 0x3d447e4b, + 0x463ff1d2, 0x8d698a77, 0xd57f7069, 0x1e290bcc, 0xae3db947, 0x656bc2e2, + 0x3d7d38fc, 0xf62b4359, 0x8d50ccc0, 0x4606b765, 0x1e104d7b, 0xd54636de, + 0xe8e75249, 0x23b129ec, 0x7ba7d3f2, 0xb0f1a857, 0xcb8a27ce, 0x00dc5c6b, + 0x58caa675, 0x939cddd0, 0x7a1f6b24, 0xb1491081, 0xe95fea9f, 0x2209913a, + 0x59721ea3, 0x92246506, 0xca329f18, 0x0164e4bd, 0x3cc5802a, 0xf793fb8f, + 0xaf850191, 0x64d37a34, 0x1fa8f5ad, 0xd4fe8e08, 0x8ce87416, 0x47be0fb3, + 0xf7aabd38, 0x3cfcc69d, 0x64ea3c83, 0xafbc4726, 0xd4c7c8bf, 0x1f91b31a, + 0x47874904, 0x8cd132a1, 0xb1705636, 0x7a262d93, 0x2230d78d, 0xe966ac28, + 0x921d23b1, 0x594b5814, 0x015da20a, 0xca0bd9af, 0x6498b1ed, 0xafceca48, + 0xf7d83056, 0x3c8e4bf3, 0x47f5c46a, 0x8ca3bfcf, 0xd4b545d1, 0x1fe33e74, + 0x22425ae3, 0xe9142146, 0xb102db58, 0x7a54a0fd, 0x012f2f64, 0xca7954c1, + 0x926faedf, 0x5939d57a, 0xe92d67f1, 0x227b1c54, 0x7a6de64a, 0xb13b9def, + 0xca401276, 0x011669d3, 0x590093cd, 0x9256e868, 0xaff78cff, 0x64a1f75a, + 0x3cb70d44, 0xf7e176e1, 0x8c9af978, 0x47cc82dd, 0x1fda78c3, 0xd48c0366, + 0x4710deb6, 0x8c46a513, 0xd4505f0d, 0x1f0624a8, 0x647dab31, 0xaf2bd094, + 0xf73d2a8a, 0x3c6b512f, 0x01ca35b8, 0xca9c4e1d, 0x928ab403, 0x59dccfa6, + 0x22a7403f, 0xe9f13b9a, 0xb1e7c184, 0x7ab1ba21, 0xcaa508aa, 0x01f3730f, + 0x59e58911, 0x92b3f2b4, 0xe9c87d2d, 0x229e0688, 0x7a88fc96, 0xb1de8733, + 0x8c7fe3a4, 0x47299801, 0x1f3f621f, 0xd46919ba, 0xaf129623, 0x6444ed86, + 0x3c521798, 0xf7046c3d, 0x5997047f, 0x92c17fda, 0xcad785c4, 0x0181fe61, + 0x7afa71f8, 0xb1ac0a5d, 0xe9baf043, 0x22ec8be6, 0x1f4def71, 0xd41b94d4, + 0x8c0d6eca, 0x475b156f, 0x3c209af6, 0xf776e153, 0xaf601b4d, 0x643660e8, + 0xd422d263, 0x1f74a9c6, 0x476253d8, 0x8c34287d, 0xf74fa7e4, 0x3c19dc41, + 0x640f265f, 0xaf595dfa, 0x92f8396d, 0x59ae42c8, 0x01b8b8d6, 0xcaeec373, + 0xb1954cea, 0x7ac3374f, 0x22d5cd51, 0xe983b6f4}, + + {0x00000000, 0x9771f7c1, 0x2b0f9973, 0xbc7e6eb2, 0x561f32e6, 0xc16ec527, + 0x7d10ab95, 0xea615c54, 0xac3e65cc, 0x3b4f920d, 0x8731fcbf, 0x10400b7e, + 0xfa21572a, 0x6d50a0eb, 0xd12ece59, 0x465f3998, 0x5d90bd69, 0xcae14aa8, + 0x769f241a, 0xe1eed3db, 0x0b8f8f8f, 0x9cfe784e, 0x208016fc, 0xb7f1e13d, + 0xf1aed8a5, 0x66df2f64, 0xdaa141d6, 0x4dd0b617, 0xa7b1ea43, 0x30c01d82, + 0x8cbe7330, 0x1bcf84f1, 0xbb217ad2, 0x2c508d13, 0x902ee3a1, 0x075f1460, + 0xed3e4834, 0x7a4fbff5, 0xc631d147, 0x51402686, 0x171f1f1e, 0x806ee8df, + 0x3c10866d, 0xab6171ac, 0x41002df8, 0xd671da39, 0x6a0fb48b, 0xfd7e434a, + 0xe6b1c7bb, 0x71c0307a, 0xcdbe5ec8, 0x5acfa909, 0xb0aef55d, 0x27df029c, + 0x9ba16c2e, 0x0cd09bef, 0x4a8fa277, 0xddfe55b6, 0x61803b04, 0xf6f1ccc5, + 0x1c909091, 0x8be16750, 0x379f09e2, 0xa0eefe23, 0x73ae8355, 0xe4df7494, + 0x58a11a26, 0xcfd0ede7, 0x25b1b1b3, 0xb2c04672, 0x0ebe28c0, 0x99cfdf01, + 0xdf90e699, 0x48e11158, 0xf49f7fea, 0x63ee882b, 0x898fd47f, 0x1efe23be, + 0xa2804d0c, 0x35f1bacd, 0x2e3e3e3c, 0xb94fc9fd, 0x0531a74f, 0x9240508e, + 0x78210cda, 0xef50fb1b, 0x532e95a9, 0xc45f6268, 0x82005bf0, 0x1571ac31, + 0xa90fc283, 0x3e7e3542, 0xd41f6916, 0x436e9ed7, 0xff10f065, 0x686107a4, + 0xc88ff987, 0x5ffe0e46, 0xe38060f4, 0x74f19735, 0x9e90cb61, 0x09e13ca0, + 0xb59f5212, 0x22eea5d3, 0x64b19c4b, 0xf3c06b8a, 0x4fbe0538, 0xd8cff2f9, + 0x32aeaead, 0xa5df596c, 0x19a137de, 0x8ed0c01f, 0x951f44ee, 0x026eb32f, + 0xbe10dd9d, 0x29612a5c, 0xc3007608, 0x547181c9, 0xe80fef7b, 0x7f7e18ba, + 0x39212122, 0xae50d6e3, 0x122eb851, 0x855f4f90, 0x6f3e13c4, 0xf84fe405, + 0x44318ab7, 0xd3407d76, 0xe75d06aa, 0x702cf16b, 0xcc529fd9, 0x5b236818, + 0xb142344c, 0x2633c38d, 0x9a4dad3f, 0x0d3c5afe, 0x4b636366, 0xdc1294a7, + 0x606cfa15, 0xf71d0dd4, 0x1d7c5180, 0x8a0da641, 0x3673c8f3, 0xa1023f32, + 0xbacdbbc3, 0x2dbc4c02, 0x91c222b0, 0x06b3d571, 0xecd28925, 0x7ba37ee4, + 0xc7dd1056, 0x50ace797, 0x16f3de0f, 0x818229ce, 0x3dfc477c, 0xaa8db0bd, + 0x40ecece9, 0xd79d1b28, 0x6be3759a, 0xfc92825b, 0x5c7c7c78, 0xcb0d8bb9, + 0x7773e50b, 0xe00212ca, 0x0a634e9e, 0x9d12b95f, 0x216cd7ed, 0xb61d202c, + 0xf04219b4, 0x6733ee75, 0xdb4d80c7, 0x4c3c7706, 0xa65d2b52, 0x312cdc93, + 0x8d52b221, 0x1a2345e0, 0x01ecc111, 0x969d36d0, 0x2ae35862, 0xbd92afa3, + 0x57f3f3f7, 0xc0820436, 0x7cfc6a84, 0xeb8d9d45, 0xadd2a4dd, 0x3aa3531c, + 0x86dd3dae, 0x11acca6f, 0xfbcd963b, 0x6cbc61fa, 0xd0c20f48, 0x47b3f889, + 0x94f385ff, 0x0382723e, 0xbffc1c8c, 0x288deb4d, 0xc2ecb719, 0x559d40d8, + 0xe9e32e6a, 0x7e92d9ab, 0x38cde033, 0xafbc17f2, 0x13c27940, 0x84b38e81, + 0x6ed2d2d5, 0xf9a32514, 0x45dd4ba6, 0xd2acbc67, 0xc9633896, 0x5e12cf57, + 0xe26ca1e5, 0x751d5624, 0x9f7c0a70, 0x080dfdb1, 0xb4739303, 0x230264c2, + 0x655d5d5a, 0xf22caa9b, 0x4e52c429, 0xd92333e8, 0x33426fbc, 0xa433987d, + 0x184df6cf, 0x8f3c010e, 0x2fd2ff2d, 0xb8a308ec, 0x04dd665e, 0x93ac919f, + 0x79cdcdcb, 0xeebc3a0a, 0x52c254b8, 0xc5b3a379, 0x83ec9ae1, 0x149d6d20, + 0xa8e30392, 0x3f92f453, 0xd5f3a807, 0x42825fc6, 0xfefc3174, 0x698dc6b5, + 0x72424244, 0xe533b585, 0x594ddb37, 0xce3c2cf6, 0x245d70a2, 0xb32c8763, + 0x0f52e9d1, 0x98231e10, 0xde7c2788, 0x490dd049, 0xf573befb, 0x6202493a, + 0x8863156e, 0x1f12e2af, 0xa36c8c1d, 0x341d7bdc}, + + {0x00000000, 0x3171d430, 0x62e3a860, 0x53927c50, 0xc5c750c0, 0xf4b684f0, + 0xa724f8a0, 0x96552c90, 0x8e62d771, 0xbf130341, 0xec817f11, 0xddf0ab21, + 0x4ba587b1, 0x7ad45381, 0x29462fd1, 0x1837fbe1, 0x1929d813, 0x28580c23, + 0x7bca7073, 0x4abba443, 0xdcee88d3, 0xed9f5ce3, 0xbe0d20b3, 0x8f7cf483, + 0x974b0f62, 0xa63adb52, 0xf5a8a702, 0xc4d97332, 0x528c5fa2, 0x63fd8b92, + 0x306ff7c2, 0x011e23f2, 0x3253b026, 0x03226416, 0x50b01846, 0x61c1cc76, + 0xf794e0e6, 0xc6e534d6, 0x95774886, 0xa4069cb6, 0xbc316757, 0x8d40b367, + 0xded2cf37, 0xefa31b07, 0x79f63797, 0x4887e3a7, 0x1b159ff7, 0x2a644bc7, + 0x2b7a6835, 0x1a0bbc05, 0x4999c055, 0x78e81465, 0xeebd38f5, 0xdfccecc5, + 0x8c5e9095, 0xbd2f44a5, 0xa518bf44, 0x94696b74, 0xc7fb1724, 0xf68ac314, + 0x60dfef84, 0x51ae3bb4, 0x023c47e4, 0x334d93d4, 0x64a7604c, 0x55d6b47c, + 0x0644c82c, 0x37351c1c, 0xa160308c, 0x9011e4bc, 0xc38398ec, 0xf2f24cdc, + 0xeac5b73d, 0xdbb4630d, 0x88261f5d, 0xb957cb6d, 0x2f02e7fd, 0x1e7333cd, + 0x4de14f9d, 0x7c909bad, 0x7d8eb85f, 0x4cff6c6f, 0x1f6d103f, 0x2e1cc40f, + 0xb849e89f, 0x89383caf, 0xdaaa40ff, 0xebdb94cf, 0xf3ec6f2e, 0xc29dbb1e, + 0x910fc74e, 0xa07e137e, 0x362b3fee, 0x075aebde, 0x54c8978e, 0x65b943be, + 0x56f4d06a, 0x6785045a, 0x3417780a, 0x0566ac3a, 0x933380aa, 0xa242549a, + 0xf1d028ca, 0xc0a1fcfa, 0xd896071b, 0xe9e7d32b, 0xba75af7b, 0x8b047b4b, + 0x1d5157db, 0x2c2083eb, 0x7fb2ffbb, 0x4ec32b8b, 0x4fdd0879, 0x7eacdc49, + 0x2d3ea019, 0x1c4f7429, 0x8a1a58b9, 0xbb6b8c89, 0xe8f9f0d9, 0xd98824e9, + 0xc1bfdf08, 0xf0ce0b38, 0xa35c7768, 0x922da358, 0x04788fc8, 0x35095bf8, + 0x669b27a8, 0x57eaf398, 0xc94ec098, 0xf83f14a8, 0xabad68f8, 0x9adcbcc8, + 0x0c899058, 0x3df84468, 0x6e6a3838, 0x5f1bec08, 0x472c17e9, 0x765dc3d9, + 0x25cfbf89, 0x14be6bb9, 0x82eb4729, 0xb39a9319, 0xe008ef49, 0xd1793b79, + 0xd067188b, 0xe116ccbb, 0xb284b0eb, 0x83f564db, 0x15a0484b, 0x24d19c7b, + 0x7743e02b, 0x4632341b, 0x5e05cffa, 0x6f741bca, 0x3ce6679a, 0x0d97b3aa, + 0x9bc29f3a, 0xaab34b0a, 0xf921375a, 0xc850e36a, 0xfb1d70be, 0xca6ca48e, + 0x99fed8de, 0xa88f0cee, 0x3eda207e, 0x0fabf44e, 0x5c39881e, 0x6d485c2e, + 0x757fa7cf, 0x440e73ff, 0x179c0faf, 0x26eddb9f, 0xb0b8f70f, 0x81c9233f, + 0xd25b5f6f, 0xe32a8b5f, 0xe234a8ad, 0xd3457c9d, 0x80d700cd, 0xb1a6d4fd, + 0x27f3f86d, 0x16822c5d, 0x4510500d, 0x7461843d, 0x6c567fdc, 0x5d27abec, + 0x0eb5d7bc, 0x3fc4038c, 0xa9912f1c, 0x98e0fb2c, 0xcb72877c, 0xfa03534c, + 0xade9a0d4, 0x9c9874e4, 0xcf0a08b4, 0xfe7bdc84, 0x682ef014, 0x595f2424, + 0x0acd5874, 0x3bbc8c44, 0x238b77a5, 0x12faa395, 0x4168dfc5, 0x70190bf5, + 0xe64c2765, 0xd73df355, 0x84af8f05, 0xb5de5b35, 0xb4c078c7, 0x85b1acf7, + 0xd623d0a7, 0xe7520497, 0x71072807, 0x4076fc37, 0x13e48067, 0x22955457, + 0x3aa2afb6, 0x0bd37b86, 0x584107d6, 0x6930d3e6, 0xff65ff76, 0xce142b46, + 0x9d865716, 0xacf78326, 0x9fba10f2, 0xaecbc4c2, 0xfd59b892, 0xcc286ca2, + 0x5a7d4032, 0x6b0c9402, 0x389ee852, 0x09ef3c62, 0x11d8c783, 0x20a913b3, + 0x733b6fe3, 0x424abbd3, 0xd41f9743, 0xe56e4373, 0xb6fc3f23, 0x878deb13, + 0x8693c8e1, 0xb7e21cd1, 0xe4706081, 0xd501b4b1, 0x43549821, 0x72254c11, + 0x21b73041, 0x10c6e471, 0x08f11f90, 0x3980cba0, 0x6a12b7f0, 0x5b6363c0, + 0xcd364f50, 0xfc479b60, 0xafd5e730, 0x9ea43300}, + + {0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1, + 0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76, + 0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526, + 0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478, + 0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b, + 0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229, + 0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a, + 0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664, + 0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34, + 0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3, + 0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69, + 0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37, + 0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924, + 0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0, + 0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3, + 0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad, + 0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b, + 0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc, + 0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac, + 0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2, + 0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1, + 0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7, + 0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4, + 0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa, + 0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa, + 0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d, + 0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb, + 0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5, + 0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6, + 0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572, + 0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061, + 0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f, + 0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5, + 0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262, + 0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32, + 0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c, + 0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f, + 0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d, + 0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e, + 0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970, + 0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120, + 0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7, + 0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433}, + + {0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af, + 0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818, + 0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13, + 0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576, + 0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828, + 0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60, + 0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e, + 0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b, + 0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50, + 0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7, + 0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3, + 0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86, + 0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8, + 0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a, + 0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864, + 0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101, + 0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0, + 0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917, + 0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c, + 0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479, + 0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927, + 0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880, + 0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de, + 0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb, + 0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0, + 0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607, + 0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6, + 0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3, + 0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d, + 0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f, + 0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21, + 0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744, + 0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240, + 0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7, + 0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc, + 0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199, + 0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7, + 0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f, + 0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1, + 0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4, + 0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf, + 0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708, + 0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1}, + + {0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4, + 0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418, + 0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37, + 0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0, + 0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9, + 0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f, + 0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276, + 0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81, + 0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae, + 0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42, + 0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328, + 0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf, + 0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6, + 0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c, + 0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605, + 0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2, + 0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1, + 0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d, + 0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972, + 0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185, + 0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c, + 0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0, + 0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9, + 0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e, + 0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361, + 0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d, + 0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce, + 0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339, + 0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20, + 0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa, + 0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3, + 0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614, + 0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e, + 0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092, + 0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd, + 0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a, + 0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53, + 0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5, + 0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc, + 0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b, + 0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124, + 0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8, + 0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d}, + + {0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115, + 0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4, + 0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541, + 0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7, + 0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d, + 0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d, + 0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7, + 0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241, + 0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4, + 0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615, + 0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02, + 0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4, + 0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce, + 0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0, + 0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a, + 0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c, + 0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297, + 0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56, + 0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3, + 0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725, + 0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f, + 0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b, + 0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721, + 0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7, + 0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52, + 0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293, + 0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978, + 0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e, + 0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4, + 0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca, + 0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0, + 0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06, + 0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611, + 0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0, + 0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245, + 0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3, + 0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189, + 0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689, + 0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3, + 0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545, + 0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0, + 0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111, + 0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa} + +}; + +static uint32_t long_shifts[4][256] = { + {0x00000000, 0xe040e0ac, 0xc56db7a9, 0x252d5705, 0x8f3719a3, 0x6f77f90f, + 0x4a5aae0a, 0xaa1a4ea6, 0x1b8245b7, 0xfbc2a51b, 0xdeeff21e, 0x3eaf12b2, + 0x94b55c14, 0x74f5bcb8, 0x51d8ebbd, 0xb1980b11, 0x37048b6e, 0xd7446bc2, + 0xf2693cc7, 0x1229dc6b, 0xb83392cd, 0x58737261, 0x7d5e2564, 0x9d1ec5c8, + 0x2c86ced9, 0xccc62e75, 0xe9eb7970, 0x09ab99dc, 0xa3b1d77a, 0x43f137d6, + 0x66dc60d3, 0x869c807f, 0x6e0916dc, 0x8e49f670, 0xab64a175, 0x4b2441d9, + 0xe13e0f7f, 0x017eefd3, 0x2453b8d6, 0xc413587a, 0x758b536b, 0x95cbb3c7, + 0xb0e6e4c2, 0x50a6046e, 0xfabc4ac8, 0x1afcaa64, 0x3fd1fd61, 0xdf911dcd, + 0x590d9db2, 0xb94d7d1e, 0x9c602a1b, 0x7c20cab7, 0xd63a8411, 0x367a64bd, + 0x135733b8, 0xf317d314, 0x428fd805, 0xa2cf38a9, 0x87e26fac, 0x67a28f00, + 0xcdb8c1a6, 0x2df8210a, 0x08d5760f, 0xe89596a3, 0xdc122db8, 0x3c52cd14, + 0x197f9a11, 0xf93f7abd, 0x5325341b, 0xb365d4b7, 0x964883b2, 0x7608631e, + 0xc790680f, 0x27d088a3, 0x02fddfa6, 0xe2bd3f0a, 0x48a771ac, 0xa8e79100, + 0x8dcac605, 0x6d8a26a9, 0xeb16a6d6, 0x0b56467a, 0x2e7b117f, 0xce3bf1d3, + 0x6421bf75, 0x84615fd9, 0xa14c08dc, 0x410ce870, 0xf094e361, 0x10d403cd, + 0x35f954c8, 0xd5b9b464, 0x7fa3fac2, 0x9fe31a6e, 0xbace4d6b, 0x5a8eadc7, + 0xb21b3b64, 0x525bdbc8, 0x77768ccd, 0x97366c61, 0x3d2c22c7, 0xdd6cc26b, + 0xf841956e, 0x180175c2, 0xa9997ed3, 0x49d99e7f, 0x6cf4c97a, 0x8cb429d6, + 0x26ae6770, 0xc6ee87dc, 0xe3c3d0d9, 0x03833075, 0x851fb00a, 0x655f50a6, + 0x407207a3, 0xa032e70f, 0x0a28a9a9, 0xea684905, 0xcf451e00, 0x2f05feac, + 0x9e9df5bd, 0x7edd1511, 0x5bf04214, 0xbbb0a2b8, 0x11aaec1e, 0xf1ea0cb2, + 0xd4c75bb7, 0x3487bb1b, 0xbdc82d81, 0x5d88cd2d, 0x78a59a28, 0x98e57a84, + 0x32ff3422, 0xd2bfd48e, 0xf792838b, 0x17d26327, 0xa64a6836, 0x460a889a, + 0x6327df9f, 0x83673f33, 0x297d7195, 0xc93d9139, 0xec10c63c, 0x0c502690, + 0x8acca6ef, 0x6a8c4643, 0x4fa11146, 0xafe1f1ea, 0x05fbbf4c, 0xe5bb5fe0, + 0xc09608e5, 0x20d6e849, 0x914ee358, 0x710e03f4, 0x542354f1, 0xb463b45d, + 0x1e79fafb, 0xfe391a57, 0xdb144d52, 0x3b54adfe, 0xd3c13b5d, 0x3381dbf1, + 0x16ac8cf4, 0xf6ec6c58, 0x5cf622fe, 0xbcb6c252, 0x999b9557, 0x79db75fb, + 0xc8437eea, 0x28039e46, 0x0d2ec943, 0xed6e29ef, 0x47746749, 0xa73487e5, + 0x8219d0e0, 0x6259304c, 0xe4c5b033, 0x0485509f, 0x21a8079a, 0xc1e8e736, + 0x6bf2a990, 0x8bb2493c, 0xae9f1e39, 0x4edffe95, 0xff47f584, 0x1f071528, + 0x3a2a422d, 0xda6aa281, 0x7070ec27, 0x90300c8b, 0xb51d5b8e, 0x555dbb22, + 0x61da0039, 0x819ae095, 0xa4b7b790, 0x44f7573c, 0xeeed199a, 0x0eadf936, + 0x2b80ae33, 0xcbc04e9f, 0x7a58458e, 0x9a18a522, 0xbf35f227, 0x5f75128b, + 0xf56f5c2d, 0x152fbc81, 0x3002eb84, 0xd0420b28, 0x56de8b57, 0xb69e6bfb, + 0x93b33cfe, 0x73f3dc52, 0xd9e992f4, 0x39a97258, 0x1c84255d, 0xfcc4c5f1, + 0x4d5ccee0, 0xad1c2e4c, 0x88317949, 0x687199e5, 0xc26bd743, 0x222b37ef, + 0x070660ea, 0xe7468046, 0x0fd316e5, 0xef93f649, 0xcabea14c, 0x2afe41e0, + 0x80e40f46, 0x60a4efea, 0x4589b8ef, 0xa5c95843, 0x14515352, 0xf411b3fe, + 0xd13ce4fb, 0x317c0457, 0x9b664af1, 0x7b26aa5d, 0x5e0bfd58, 0xbe4b1df4, + 0x38d79d8b, 0xd8977d27, 0xfdba2a22, 0x1dfaca8e, 0xb7e08428, 0x57a06484, + 0x728d3381, 0x92cdd32d, 0x2355d83c, 0xc3153890, 0xe6386f95, 0x06788f39, + 0xac62c19f, 0x4c222133, 0x690f7636, 0x894f969a}, + + {0x00000000, 0x7e7c2df3, 0xfcf85be6, 0x82847615, 0xfc1cc13d, 0x8260ecce, + 0x00e49adb, 0x7e98b728, 0xfdd5f48b, 0x83a9d978, 0x012daf6d, 0x7f51829e, + 0x01c935b6, 0x7fb51845, 0xfd316e50, 0x834d43a3, 0xfe479fe7, 0x803bb214, + 0x02bfc401, 0x7cc3e9f2, 0x025b5eda, 0x7c277329, 0xfea3053c, 0x80df28cf, + 0x03926b6c, 0x7dee469f, 0xff6a308a, 0x81161d79, 0xff8eaa51, 0x81f287a2, + 0x0376f1b7, 0x7d0adc44, 0xf963493f, 0x871f64cc, 0x059b12d9, 0x7be73f2a, + 0x057f8802, 0x7b03a5f1, 0xf987d3e4, 0x87fbfe17, 0x04b6bdb4, 0x7aca9047, + 0xf84ee652, 0x8632cba1, 0xf8aa7c89, 0x86d6517a, 0x0452276f, 0x7a2e0a9c, + 0x0724d6d8, 0x7958fb2b, 0xfbdc8d3e, 0x85a0a0cd, 0xfb3817e5, 0x85443a16, + 0x07c04c03, 0x79bc61f0, 0xfaf12253, 0x848d0fa0, 0x060979b5, 0x78755446, + 0x06ede36e, 0x7891ce9d, 0xfa15b888, 0x8469957b, 0xf72ae48f, 0x8956c97c, + 0x0bd2bf69, 0x75ae929a, 0x0b3625b2, 0x754a0841, 0xf7ce7e54, 0x89b253a7, + 0x0aff1004, 0x74833df7, 0xf6074be2, 0x887b6611, 0xf6e3d139, 0x889ffcca, + 0x0a1b8adf, 0x7467a72c, 0x096d7b68, 0x7711569b, 0xf595208e, 0x8be90d7d, + 0xf571ba55, 0x8b0d97a6, 0x0989e1b3, 0x77f5cc40, 0xf4b88fe3, 0x8ac4a210, + 0x0840d405, 0x763cf9f6, 0x08a44ede, 0x76d8632d, 0xf45c1538, 0x8a2038cb, + 0x0e49adb0, 0x70358043, 0xf2b1f656, 0x8ccddba5, 0xf2556c8d, 0x8c29417e, + 0x0ead376b, 0x70d11a98, 0xf39c593b, 0x8de074c8, 0x0f6402dd, 0x71182f2e, + 0x0f809806, 0x71fcb5f5, 0xf378c3e0, 0x8d04ee13, 0xf00e3257, 0x8e721fa4, + 0x0cf669b1, 0x728a4442, 0x0c12f36a, 0x726ede99, 0xf0eaa88c, 0x8e96857f, + 0x0ddbc6dc, 0x73a7eb2f, 0xf1239d3a, 0x8f5fb0c9, 0xf1c707e1, 0x8fbb2a12, + 0x0d3f5c07, 0x734371f4, 0xebb9bfef, 0x95c5921c, 0x1741e409, 0x693dc9fa, + 0x17a57ed2, 0x69d95321, 0xeb5d2534, 0x952108c7, 0x166c4b64, 0x68106697, + 0xea941082, 0x94e83d71, 0xea708a59, 0x940ca7aa, 0x1688d1bf, 0x68f4fc4c, + 0x15fe2008, 0x6b820dfb, 0xe9067bee, 0x977a561d, 0xe9e2e135, 0x979eccc6, + 0x151abad3, 0x6b669720, 0xe82bd483, 0x9657f970, 0x14d38f65, 0x6aafa296, + 0x143715be, 0x6a4b384d, 0xe8cf4e58, 0x96b363ab, 0x12daf6d0, 0x6ca6db23, + 0xee22ad36, 0x905e80c5, 0xeec637ed, 0x90ba1a1e, 0x123e6c0b, 0x6c4241f8, + 0xef0f025b, 0x91732fa8, 0x13f759bd, 0x6d8b744e, 0x1313c366, 0x6d6fee95, + 0xefeb9880, 0x9197b573, 0xec9d6937, 0x92e144c4, 0x106532d1, 0x6e191f22, + 0x1081a80a, 0x6efd85f9, 0xec79f3ec, 0x9205de1f, 0x11489dbc, 0x6f34b04f, + 0xedb0c65a, 0x93cceba9, 0xed545c81, 0x93287172, 0x11ac0767, 0x6fd02a94, + 0x1c935b60, 0x62ef7693, 0xe06b0086, 0x9e172d75, 0xe08f9a5d, 0x9ef3b7ae, + 0x1c77c1bb, 0x620bec48, 0xe146afeb, 0x9f3a8218, 0x1dbef40d, 0x63c2d9fe, + 0x1d5a6ed6, 0x63264325, 0xe1a23530, 0x9fde18c3, 0xe2d4c487, 0x9ca8e974, + 0x1e2c9f61, 0x6050b292, 0x1ec805ba, 0x60b42849, 0xe2305e5c, 0x9c4c73af, + 0x1f01300c, 0x617d1dff, 0xe3f96bea, 0x9d854619, 0xe31df131, 0x9d61dcc2, + 0x1fe5aad7, 0x61998724, 0xe5f0125f, 0x9b8c3fac, 0x190849b9, 0x6774644a, + 0x19ecd362, 0x6790fe91, 0xe5148884, 0x9b68a577, 0x1825e6d4, 0x6659cb27, + 0xe4ddbd32, 0x9aa190c1, 0xe43927e9, 0x9a450a1a, 0x18c17c0f, 0x66bd51fc, + 0x1bb78db8, 0x65cba04b, 0xe74fd65e, 0x9933fbad, 0xe7ab4c85, 0x99d76176, + 0x1b531763, 0x652f3a90, 0xe6627933, 0x981e54c0, 0x1a9a22d5, 0x64e60f26, + 0x1a7eb80e, 0x640295fd, 0xe686e3e8, 0x98face1b}, + + {0x00000000, 0xd29f092f, 0xa0d264af, 0x724d6d80, 0x4448bfaf, 0x96d7b680, + 0xe49adb00, 0x3605d22f, 0x88917f5e, 0x5a0e7671, 0x28431bf1, 0xfadc12de, + 0xccd9c0f1, 0x1e46c9de, 0x6c0ba45e, 0xbe94ad71, 0x14ce884d, 0xc6518162, + 0xb41cece2, 0x6683e5cd, 0x508637e2, 0x82193ecd, 0xf054534d, 0x22cb5a62, + 0x9c5ff713, 0x4ec0fe3c, 0x3c8d93bc, 0xee129a93, 0xd81748bc, 0x0a884193, + 0x78c52c13, 0xaa5a253c, 0x299d109a, 0xfb0219b5, 0x894f7435, 0x5bd07d1a, + 0x6dd5af35, 0xbf4aa61a, 0xcd07cb9a, 0x1f98c2b5, 0xa10c6fc4, 0x739366eb, + 0x01de0b6b, 0xd3410244, 0xe544d06b, 0x37dbd944, 0x4596b4c4, 0x9709bdeb, + 0x3d5398d7, 0xefcc91f8, 0x9d81fc78, 0x4f1ef557, 0x791b2778, 0xab842e57, + 0xd9c943d7, 0x0b564af8, 0xb5c2e789, 0x675deea6, 0x15108326, 0xc78f8a09, + 0xf18a5826, 0x23155109, 0x51583c89, 0x83c735a6, 0x533a2134, 0x81a5281b, + 0xf3e8459b, 0x21774cb4, 0x17729e9b, 0xc5ed97b4, 0xb7a0fa34, 0x653ff31b, + 0xdbab5e6a, 0x09345745, 0x7b793ac5, 0xa9e633ea, 0x9fe3e1c5, 0x4d7ce8ea, + 0x3f31856a, 0xedae8c45, 0x47f4a979, 0x956ba056, 0xe726cdd6, 0x35b9c4f9, + 0x03bc16d6, 0xd1231ff9, 0xa36e7279, 0x71f17b56, 0xcf65d627, 0x1dfadf08, + 0x6fb7b288, 0xbd28bba7, 0x8b2d6988, 0x59b260a7, 0x2bff0d27, 0xf9600408, + 0x7aa731ae, 0xa8383881, 0xda755501, 0x08ea5c2e, 0x3eef8e01, 0xec70872e, + 0x9e3deaae, 0x4ca2e381, 0xf2364ef0, 0x20a947df, 0x52e42a5f, 0x807b2370, + 0xb67ef15f, 0x64e1f870, 0x16ac95f0, 0xc4339cdf, 0x6e69b9e3, 0xbcf6b0cc, + 0xcebbdd4c, 0x1c24d463, 0x2a21064c, 0xf8be0f63, 0x8af362e3, 0x586c6bcc, + 0xe6f8c6bd, 0x3467cf92, 0x462aa212, 0x94b5ab3d, 0xa2b07912, 0x702f703d, + 0x02621dbd, 0xd0fd1492, 0xa6744268, 0x74eb4b47, 0x06a626c7, 0xd4392fe8, + 0xe23cfdc7, 0x30a3f4e8, 0x42ee9968, 0x90719047, 0x2ee53d36, 0xfc7a3419, + 0x8e375999, 0x5ca850b6, 0x6aad8299, 0xb8328bb6, 0xca7fe636, 0x18e0ef19, + 0xb2baca25, 0x6025c30a, 0x1268ae8a, 0xc0f7a7a5, 0xf6f2758a, 0x246d7ca5, + 0x56201125, 0x84bf180a, 0x3a2bb57b, 0xe8b4bc54, 0x9af9d1d4, 0x4866d8fb, + 0x7e630ad4, 0xacfc03fb, 0xdeb16e7b, 0x0c2e6754, 0x8fe952f2, 0x5d765bdd, + 0x2f3b365d, 0xfda43f72, 0xcba1ed5d, 0x193ee472, 0x6b7389f2, 0xb9ec80dd, + 0x07782dac, 0xd5e72483, 0xa7aa4903, 0x7535402c, 0x43309203, 0x91af9b2c, + 0xe3e2f6ac, 0x317dff83, 0x9b27dabf, 0x49b8d390, 0x3bf5be10, 0xe96ab73f, + 0xdf6f6510, 0x0df06c3f, 0x7fbd01bf, 0xad220890, 0x13b6a5e1, 0xc129acce, + 0xb364c14e, 0x61fbc861, 0x57fe1a4e, 0x85611361, 0xf72c7ee1, 0x25b377ce, + 0xf54e635c, 0x27d16a73, 0x559c07f3, 0x87030edc, 0xb106dcf3, 0x6399d5dc, + 0x11d4b85c, 0xc34bb173, 0x7ddf1c02, 0xaf40152d, 0xdd0d78ad, 0x0f927182, + 0x3997a3ad, 0xeb08aa82, 0x9945c702, 0x4bdace2d, 0xe180eb11, 0x331fe23e, + 0x41528fbe, 0x93cd8691, 0xa5c854be, 0x77575d91, 0x051a3011, 0xd785393e, + 0x6911944f, 0xbb8e9d60, 0xc9c3f0e0, 0x1b5cf9cf, 0x2d592be0, 0xffc622cf, + 0x8d8b4f4f, 0x5f144660, 0xdcd373c6, 0x0e4c7ae9, 0x7c011769, 0xae9e1e46, + 0x989bcc69, 0x4a04c546, 0x3849a8c6, 0xead6a1e9, 0x54420c98, 0x86dd05b7, + 0xf4906837, 0x260f6118, 0x100ab337, 0xc295ba18, 0xb0d8d798, 0x6247deb7, + 0xc81dfb8b, 0x1a82f2a4, 0x68cf9f24, 0xba50960b, 0x8c554424, 0x5eca4d0b, + 0x2c87208b, 0xfe1829a4, 0x408c84d5, 0x92138dfa, 0xe05ee07a, 0x32c1e955, + 0x04c43b7a, 0xd65b3255, 0xa4165fd5, 0x768956fa}, + + {0x00000000, 0x4904f221, 0x9209e442, 0xdb0d1663, 0x21ffbe75, 0x68fb4c54, + 0xb3f65a37, 0xfaf2a816, 0x43ff7cea, 0x0afb8ecb, 0xd1f698a8, 0x98f26a89, + 0x6200c29f, 0x2b0430be, 0xf00926dd, 0xb90dd4fc, 0x87fef9d4, 0xcefa0bf5, + 0x15f71d96, 0x5cf3efb7, 0xa60147a1, 0xef05b580, 0x3408a3e3, 0x7d0c51c2, + 0xc401853e, 0x8d05771f, 0x5608617c, 0x1f0c935d, 0xe5fe3b4b, 0xacfac96a, + 0x77f7df09, 0x3ef32d28, 0x0a118559, 0x43157778, 0x9818611b, 0xd11c933a, + 0x2bee3b2c, 0x62eac90d, 0xb9e7df6e, 0xf0e32d4f, 0x49eef9b3, 0x00ea0b92, + 0xdbe71df1, 0x92e3efd0, 0x681147c6, 0x2115b5e7, 0xfa18a384, 0xb31c51a5, + 0x8def7c8d, 0xc4eb8eac, 0x1fe698cf, 0x56e26aee, 0xac10c2f8, 0xe51430d9, + 0x3e1926ba, 0x771dd49b, 0xce100067, 0x8714f246, 0x5c19e425, 0x151d1604, + 0xefefbe12, 0xa6eb4c33, 0x7de65a50, 0x34e2a871, 0x14230ab2, 0x5d27f893, + 0x862aeef0, 0xcf2e1cd1, 0x35dcb4c7, 0x7cd846e6, 0xa7d55085, 0xeed1a2a4, + 0x57dc7658, 0x1ed88479, 0xc5d5921a, 0x8cd1603b, 0x7623c82d, 0x3f273a0c, + 0xe42a2c6f, 0xad2ede4e, 0x93ddf366, 0xdad90147, 0x01d41724, 0x48d0e505, + 0xb2224d13, 0xfb26bf32, 0x202ba951, 0x692f5b70, 0xd0228f8c, 0x99267dad, + 0x422b6bce, 0x0b2f99ef, 0xf1dd31f9, 0xb8d9c3d8, 0x63d4d5bb, 0x2ad0279a, + 0x1e328feb, 0x57367dca, 0x8c3b6ba9, 0xc53f9988, 0x3fcd319e, 0x76c9c3bf, + 0xadc4d5dc, 0xe4c027fd, 0x5dcdf301, 0x14c90120, 0xcfc41743, 0x86c0e562, + 0x7c324d74, 0x3536bf55, 0xee3ba936, 0xa73f5b17, 0x99cc763f, 0xd0c8841e, + 0x0bc5927d, 0x42c1605c, 0xb833c84a, 0xf1373a6b, 0x2a3a2c08, 0x633ede29, + 0xda330ad5, 0x9337f8f4, 0x483aee97, 0x013e1cb6, 0xfbccb4a0, 0xb2c84681, + 0x69c550e2, 0x20c1a2c3, 0x28461564, 0x6142e745, 0xba4ff126, 0xf34b0307, + 0x09b9ab11, 0x40bd5930, 0x9bb04f53, 0xd2b4bd72, 0x6bb9698e, 0x22bd9baf, + 0xf9b08dcc, 0xb0b47fed, 0x4a46d7fb, 0x034225da, 0xd84f33b9, 0x914bc198, + 0xafb8ecb0, 0xe6bc1e91, 0x3db108f2, 0x74b5fad3, 0x8e4752c5, 0xc743a0e4, + 0x1c4eb687, 0x554a44a6, 0xec47905a, 0xa543627b, 0x7e4e7418, 0x374a8639, + 0xcdb82e2f, 0x84bcdc0e, 0x5fb1ca6d, 0x16b5384c, 0x2257903d, 0x6b53621c, + 0xb05e747f, 0xf95a865e, 0x03a82e48, 0x4aacdc69, 0x91a1ca0a, 0xd8a5382b, + 0x61a8ecd7, 0x28ac1ef6, 0xf3a10895, 0xbaa5fab4, 0x405752a2, 0x0953a083, + 0xd25eb6e0, 0x9b5a44c1, 0xa5a969e9, 0xecad9bc8, 0x37a08dab, 0x7ea47f8a, + 0x8456d79c, 0xcd5225bd, 0x165f33de, 0x5f5bc1ff, 0xe6561503, 0xaf52e722, + 0x745ff141, 0x3d5b0360, 0xc7a9ab76, 0x8ead5957, 0x55a04f34, 0x1ca4bd15, + 0x3c651fd6, 0x7561edf7, 0xae6cfb94, 0xe76809b5, 0x1d9aa1a3, 0x549e5382, + 0x8f9345e1, 0xc697b7c0, 0x7f9a633c, 0x369e911d, 0xed93877e, 0xa497755f, + 0x5e65dd49, 0x17612f68, 0xcc6c390b, 0x8568cb2a, 0xbb9be602, 0xf29f1423, + 0x29920240, 0x6096f061, 0x9a645877, 0xd360aa56, 0x086dbc35, 0x41694e14, + 0xf8649ae8, 0xb16068c9, 0x6a6d7eaa, 0x23698c8b, 0xd99b249d, 0x909fd6bc, + 0x4b92c0df, 0x029632fe, 0x36749a8f, 0x7f7068ae, 0xa47d7ecd, 0xed798cec, + 0x178b24fa, 0x5e8fd6db, 0x8582c0b8, 0xcc863299, 0x758be665, 0x3c8f1444, + 0xe7820227, 0xae86f006, 0x54745810, 0x1d70aa31, 0xc67dbc52, 0x8f794e73, + 0xb18a635b, 0xf88e917a, 0x23838719, 0x6a877538, 0x9075dd2e, 0xd9712f0f, + 0x027c396c, 0x4b78cb4d, 0xf2751fb1, 0xbb71ed90, 0x607cfbf3, 0x297809d2, + 0xd38aa1c4, 0x9a8e53e5, 0x41834586, 0x0887b7a7}}; + +static uint32_t short_shifts[4][256] = { + {0x00000000, 0xdcb17aa4, 0xbc8e83b9, 0x603ff91d, 0x7cf17183, 0xa0400b27, + 0xc07ff23a, 0x1cce889e, 0xf9e2e306, 0x255399a2, 0x456c60bf, 0x99dd1a1b, + 0x85139285, 0x59a2e821, 0x399d113c, 0xe52c6b98, 0xf629b0fd, 0x2a98ca59, + 0x4aa73344, 0x961649e0, 0x8ad8c17e, 0x5669bbda, 0x365642c7, 0xeae73863, + 0x0fcb53fb, 0xd37a295f, 0xb345d042, 0x6ff4aae6, 0x733a2278, 0xaf8b58dc, + 0xcfb4a1c1, 0x1305db65, 0xe9bf170b, 0x350e6daf, 0x553194b2, 0x8980ee16, + 0x954e6688, 0x49ff1c2c, 0x29c0e531, 0xf5719f95, 0x105df40d, 0xccec8ea9, + 0xacd377b4, 0x70620d10, 0x6cac858e, 0xb01dff2a, 0xd0220637, 0x0c937c93, + 0x1f96a7f6, 0xc327dd52, 0xa318244f, 0x7fa95eeb, 0x6367d675, 0xbfd6acd1, + 0xdfe955cc, 0x03582f68, 0xe67444f0, 0x3ac53e54, 0x5afac749, 0x864bbded, + 0x9a853573, 0x46344fd7, 0x260bb6ca, 0xfabacc6e, 0xd69258e7, 0x0a232243, + 0x6a1cdb5e, 0xb6ada1fa, 0xaa632964, 0x76d253c0, 0x16edaadd, 0xca5cd079, + 0x2f70bbe1, 0xf3c1c145, 0x93fe3858, 0x4f4f42fc, 0x5381ca62, 0x8f30b0c6, + 0xef0f49db, 0x33be337f, 0x20bbe81a, 0xfc0a92be, 0x9c356ba3, 0x40841107, + 0x5c4a9999, 0x80fbe33d, 0xe0c41a20, 0x3c756084, 0xd9590b1c, 0x05e871b8, + 0x65d788a5, 0xb966f201, 0xa5a87a9f, 0x7919003b, 0x1926f926, 0xc5978382, + 0x3f2d4fec, 0xe39c3548, 0x83a3cc55, 0x5f12b6f1, 0x43dc3e6f, 0x9f6d44cb, + 0xff52bdd6, 0x23e3c772, 0xc6cfacea, 0x1a7ed64e, 0x7a412f53, 0xa6f055f7, + 0xba3edd69, 0x668fa7cd, 0x06b05ed0, 0xda012474, 0xc904ff11, 0x15b585b5, + 0x758a7ca8, 0xa93b060c, 0xb5f58e92, 0x6944f436, 0x097b0d2b, 0xd5ca778f, + 0x30e61c17, 0xec5766b3, 0x8c689fae, 0x50d9e50a, 0x4c176d94, 0x90a61730, + 0xf099ee2d, 0x2c289489, 0xa8c8c73f, 0x7479bd9b, 0x14464486, 0xc8f73e22, + 0xd439b6bc, 0x0888cc18, 0x68b73505, 0xb4064fa1, 0x512a2439, 0x8d9b5e9d, + 0xeda4a780, 0x3115dd24, 0x2ddb55ba, 0xf16a2f1e, 0x9155d603, 0x4de4aca7, + 0x5ee177c2, 0x82500d66, 0xe26ff47b, 0x3ede8edf, 0x22100641, 0xfea17ce5, + 0x9e9e85f8, 0x422fff5c, 0xa70394c4, 0x7bb2ee60, 0x1b8d177d, 0xc73c6dd9, + 0xdbf2e547, 0x07439fe3, 0x677c66fe, 0xbbcd1c5a, 0x4177d034, 0x9dc6aa90, + 0xfdf9538d, 0x21482929, 0x3d86a1b7, 0xe137db13, 0x8108220e, 0x5db958aa, + 0xb8953332, 0x64244996, 0x041bb08b, 0xd8aaca2f, 0xc46442b1, 0x18d53815, + 0x78eac108, 0xa45bbbac, 0xb75e60c9, 0x6bef1a6d, 0x0bd0e370, 0xd76199d4, + 0xcbaf114a, 0x171e6bee, 0x772192f3, 0xab90e857, 0x4ebc83cf, 0x920df96b, + 0xf2320076, 0x2e837ad2, 0x324df24c, 0xeefc88e8, 0x8ec371f5, 0x52720b51, + 0x7e5a9fd8, 0xa2ebe57c, 0xc2d41c61, 0x1e6566c5, 0x02abee5b, 0xde1a94ff, + 0xbe256de2, 0x62941746, 0x87b87cde, 0x5b09067a, 0x3b36ff67, 0xe78785c3, + 0xfb490d5d, 0x27f877f9, 0x47c78ee4, 0x9b76f440, 0x88732f25, 0x54c25581, + 0x34fdac9c, 0xe84cd638, 0xf4825ea6, 0x28332402, 0x480cdd1f, 0x94bda7bb, + 0x7191cc23, 0xad20b687, 0xcd1f4f9a, 0x11ae353e, 0x0d60bda0, 0xd1d1c704, + 0xb1ee3e19, 0x6d5f44bd, 0x97e588d3, 0x4b54f277, 0x2b6b0b6a, 0xf7da71ce, + 0xeb14f950, 0x37a583f4, 0x579a7ae9, 0x8b2b004d, 0x6e076bd5, 0xb2b61171, + 0xd289e86c, 0x0e3892c8, 0x12f61a56, 0xce4760f2, 0xae7899ef, 0x72c9e34b, + 0x61cc382e, 0xbd7d428a, 0xdd42bb97, 0x01f3c133, 0x1d3d49ad, 0xc18c3309, + 0xa1b3ca14, 0x7d02b0b0, 0x982edb28, 0x449fa18c, 0x24a05891, 0xf8112235, + 0xe4dfaaab, 0x386ed00f, 0x58512912, 0x84e053b6}, + + {0x00000000, 0x547df88f, 0xa8fbf11e, 0xfc860991, 0x541b94cd, 0x00666c42, + 0xfce065d3, 0xa89d9d5c, 0xa837299a, 0xfc4ad115, 0x00ccd884, 0x54b1200b, + 0xfc2cbd57, 0xa85145d8, 0x54d74c49, 0x00aab4c6, 0x558225c5, 0x01ffdd4a, + 0xfd79d4db, 0xa9042c54, 0x0199b108, 0x55e44987, 0xa9624016, 0xfd1fb899, + 0xfdb50c5f, 0xa9c8f4d0, 0x554efd41, 0x013305ce, 0xa9ae9892, 0xfdd3601d, + 0x0155698c, 0x55289103, 0xab044b8a, 0xff79b305, 0x03ffba94, 0x5782421b, + 0xff1fdf47, 0xab6227c8, 0x57e42e59, 0x0399d6d6, 0x03336210, 0x574e9a9f, + 0xabc8930e, 0xffb56b81, 0x5728f6dd, 0x03550e52, 0xffd307c3, 0xabaeff4c, + 0xfe866e4f, 0xaafb96c0, 0x567d9f51, 0x020067de, 0xaa9dfa82, 0xfee0020d, + 0x02660b9c, 0x561bf313, 0x56b147d5, 0x02ccbf5a, 0xfe4ab6cb, 0xaa374e44, + 0x02aad318, 0x56d72b97, 0xaa512206, 0xfe2cda89, 0x53e4e1e5, 0x0799196a, + 0xfb1f10fb, 0xaf62e874, 0x07ff7528, 0x53828da7, 0xaf048436, 0xfb797cb9, + 0xfbd3c87f, 0xafae30f0, 0x53283961, 0x0755c1ee, 0xafc85cb2, 0xfbb5a43d, + 0x0733adac, 0x534e5523, 0x0666c420, 0x521b3caf, 0xae9d353e, 0xfae0cdb1, + 0x527d50ed, 0x0600a862, 0xfa86a1f3, 0xaefb597c, 0xae51edba, 0xfa2c1535, + 0x06aa1ca4, 0x52d7e42b, 0xfa4a7977, 0xae3781f8, 0x52b18869, 0x06cc70e6, + 0xf8e0aa6f, 0xac9d52e0, 0x501b5b71, 0x0466a3fe, 0xacfb3ea2, 0xf886c62d, + 0x0400cfbc, 0x507d3733, 0x50d783f5, 0x04aa7b7a, 0xf82c72eb, 0xac518a64, + 0x04cc1738, 0x50b1efb7, 0xac37e626, 0xf84a1ea9, 0xad628faa, 0xf91f7725, + 0x05997eb4, 0x51e4863b, 0xf9791b67, 0xad04e3e8, 0x5182ea79, 0x05ff12f6, + 0x0555a630, 0x51285ebf, 0xadae572e, 0xf9d3afa1, 0x514e32fd, 0x0533ca72, + 0xf9b5c3e3, 0xadc83b6c, 0xa7c9c3ca, 0xf3b43b45, 0x0f3232d4, 0x5b4fca5b, + 0xf3d25707, 0xa7afaf88, 0x5b29a619, 0x0f545e96, 0x0ffeea50, 0x5b8312df, + 0xa7051b4e, 0xf378e3c1, 0x5be57e9d, 0x0f988612, 0xf31e8f83, 0xa763770c, + 0xf24be60f, 0xa6361e80, 0x5ab01711, 0x0ecdef9e, 0xa65072c2, 0xf22d8a4d, + 0x0eab83dc, 0x5ad67b53, 0x5a7ccf95, 0x0e01371a, 0xf2873e8b, 0xa6fac604, + 0x0e675b58, 0x5a1aa3d7, 0xa69caa46, 0xf2e152c9, 0x0ccd8840, 0x58b070cf, + 0xa436795e, 0xf04b81d1, 0x58d61c8d, 0x0cabe402, 0xf02ded93, 0xa450151c, + 0xa4faa1da, 0xf0875955, 0x0c0150c4, 0x587ca84b, 0xf0e13517, 0xa49ccd98, + 0x581ac409, 0x0c673c86, 0x594fad85, 0x0d32550a, 0xf1b45c9b, 0xa5c9a414, + 0x0d543948, 0x5929c1c7, 0xa5afc856, 0xf1d230d9, 0xf178841f, 0xa5057c90, + 0x59837501, 0x0dfe8d8e, 0xa56310d2, 0xf11ee85d, 0x0d98e1cc, 0x59e51943, + 0xf42d222f, 0xa050daa0, 0x5cd6d331, 0x08ab2bbe, 0xa036b6e2, 0xf44b4e6d, + 0x08cd47fc, 0x5cb0bf73, 0x5c1a0bb5, 0x0867f33a, 0xf4e1faab, 0xa09c0224, + 0x08019f78, 0x5c7c67f7, 0xa0fa6e66, 0xf48796e9, 0xa1af07ea, 0xf5d2ff65, + 0x0954f6f4, 0x5d290e7b, 0xf5b49327, 0xa1c96ba8, 0x5d4f6239, 0x09329ab6, + 0x09982e70, 0x5de5d6ff, 0xa163df6e, 0xf51e27e1, 0x5d83babd, 0x09fe4232, + 0xf5784ba3, 0xa105b32c, 0x5f2969a5, 0x0b54912a, 0xf7d298bb, 0xa3af6034, + 0x0b32fd68, 0x5f4f05e7, 0xa3c90c76, 0xf7b4f4f9, 0xf71e403f, 0xa363b8b0, + 0x5fe5b121, 0x0b9849ae, 0xa305d4f2, 0xf7782c7d, 0x0bfe25ec, 0x5f83dd63, + 0x0aab4c60, 0x5ed6b4ef, 0xa250bd7e, 0xf62d45f1, 0x5eb0d8ad, 0x0acd2022, + 0xf64b29b3, 0xa236d13c, 0xa29c65fa, 0xf6e19d75, 0x0a6794e4, 0x5e1a6c6b, + 0xf687f137, 0xa2fa09b8, 0x5e7c0029, 0x0a01f8a6}, + + {0x00000000, 0x4a7ff165, 0x94ffe2ca, 0xde8013af, 0x2c13b365, 0x666c4200, + 0xb8ec51af, 0xf293a0ca, 0x582766ca, 0x125897af, 0xccd88400, 0x86a77565, + 0x7434d5af, 0x3e4b24ca, 0xe0cb3765, 0xaab4c600, 0xb04ecd94, 0xfa313cf1, + 0x24b12f5e, 0x6ecede3b, 0x9c5d7ef1, 0xd6228f94, 0x08a29c3b, 0x42dd6d5e, + 0xe869ab5e, 0xa2165a3b, 0x7c964994, 0x36e9b8f1, 0xc47a183b, 0x8e05e95e, + 0x5085faf1, 0x1afa0b94, 0x6571edd9, 0x2f0e1cbc, 0xf18e0f13, 0xbbf1fe76, + 0x49625ebc, 0x031dafd9, 0xdd9dbc76, 0x97e24d13, 0x3d568b13, 0x77297a76, + 0xa9a969d9, 0xe3d698bc, 0x11453876, 0x5b3ac913, 0x85badabc, 0xcfc52bd9, + 0xd53f204d, 0x9f40d128, 0x41c0c287, 0x0bbf33e2, 0xf92c9328, 0xb353624d, + 0x6dd371e2, 0x27ac8087, 0x8d184687, 0xc767b7e2, 0x19e7a44d, 0x53985528, + 0xa10bf5e2, 0xeb740487, 0x35f41728, 0x7f8be64d, 0xcae3dbb2, 0x809c2ad7, + 0x5e1c3978, 0x1463c81d, 0xe6f068d7, 0xac8f99b2, 0x720f8a1d, 0x38707b78, + 0x92c4bd78, 0xd8bb4c1d, 0x063b5fb2, 0x4c44aed7, 0xbed70e1d, 0xf4a8ff78, + 0x2a28ecd7, 0x60571db2, 0x7aad1626, 0x30d2e743, 0xee52f4ec, 0xa42d0589, + 0x56bea543, 0x1cc15426, 0xc2414789, 0x883eb6ec, 0x228a70ec, 0x68f58189, + 0xb6759226, 0xfc0a6343, 0x0e99c389, 0x44e632ec, 0x9a662143, 0xd019d026, + 0xaf92366b, 0xe5edc70e, 0x3b6dd4a1, 0x711225c4, 0x8381850e, 0xc9fe746b, + 0x177e67c4, 0x5d0196a1, 0xf7b550a1, 0xbdcaa1c4, 0x634ab26b, 0x2935430e, + 0xdba6e3c4, 0x91d912a1, 0x4f59010e, 0x0526f06b, 0x1fdcfbff, 0x55a30a9a, + 0x8b231935, 0xc15ce850, 0x33cf489a, 0x79b0b9ff, 0xa730aa50, 0xed4f5b35, + 0x47fb9d35, 0x0d846c50, 0xd3047fff, 0x997b8e9a, 0x6be82e50, 0x2197df35, + 0xff17cc9a, 0xb5683dff, 0x902bc195, 0xda5430f0, 0x04d4235f, 0x4eabd23a, + 0xbc3872f0, 0xf6478395, 0x28c7903a, 0x62b8615f, 0xc80ca75f, 0x8273563a, + 0x5cf34595, 0x168cb4f0, 0xe41f143a, 0xae60e55f, 0x70e0f6f0, 0x3a9f0795, + 0x20650c01, 0x6a1afd64, 0xb49aeecb, 0xfee51fae, 0x0c76bf64, 0x46094e01, + 0x98895dae, 0xd2f6accb, 0x78426acb, 0x323d9bae, 0xecbd8801, 0xa6c27964, + 0x5451d9ae, 0x1e2e28cb, 0xc0ae3b64, 0x8ad1ca01, 0xf55a2c4c, 0xbf25dd29, + 0x61a5ce86, 0x2bda3fe3, 0xd9499f29, 0x93366e4c, 0x4db67de3, 0x07c98c86, + 0xad7d4a86, 0xe702bbe3, 0x3982a84c, 0x73fd5929, 0x816ef9e3, 0xcb110886, + 0x15911b29, 0x5feeea4c, 0x4514e1d8, 0x0f6b10bd, 0xd1eb0312, 0x9b94f277, + 0x690752bd, 0x2378a3d8, 0xfdf8b077, 0xb7874112, 0x1d338712, 0x574c7677, + 0x89cc65d8, 0xc3b394bd, 0x31203477, 0x7b5fc512, 0xa5dfd6bd, 0xefa027d8, + 0x5ac81a27, 0x10b7eb42, 0xce37f8ed, 0x84480988, 0x76dba942, 0x3ca45827, + 0xe2244b88, 0xa85bbaed, 0x02ef7ced, 0x48908d88, 0x96109e27, 0xdc6f6f42, + 0x2efccf88, 0x64833eed, 0xba032d42, 0xf07cdc27, 0xea86d7b3, 0xa0f926d6, + 0x7e793579, 0x3406c41c, 0xc69564d6, 0x8cea95b3, 0x526a861c, 0x18157779, + 0xb2a1b179, 0xf8de401c, 0x265e53b3, 0x6c21a2d6, 0x9eb2021c, 0xd4cdf379, + 0x0a4de0d6, 0x403211b3, 0x3fb9f7fe, 0x75c6069b, 0xab461534, 0xe139e451, + 0x13aa449b, 0x59d5b5fe, 0x8755a651, 0xcd2a5734, 0x679e9134, 0x2de16051, + 0xf36173fe, 0xb91e829b, 0x4b8d2251, 0x01f2d334, 0xdf72c09b, 0x950d31fe, + 0x8ff73a6a, 0xc588cb0f, 0x1b08d8a0, 0x517729c5, 0xa3e4890f, 0xe99b786a, + 0x371b6bc5, 0x7d649aa0, 0xd7d05ca0, 0x9dafadc5, 0x432fbe6a, 0x09504f0f, + 0xfbc3efc5, 0xb1bc1ea0, 0x6f3c0d0f, 0x2543fc6a}, + + {0x00000000, 0x25bbf5db, 0x4b77ebb6, 0x6ecc1e6d, 0x96efd76c, 0xb35422b7, + 0xdd983cda, 0xf823c901, 0x2833d829, 0x0d882df2, 0x6344339f, 0x46ffc644, + 0xbedc0f45, 0x9b67fa9e, 0xf5abe4f3, 0xd0101128, 0x5067b052, 0x75dc4589, + 0x1b105be4, 0x3eabae3f, 0xc688673e, 0xe33392e5, 0x8dff8c88, 0xa8447953, + 0x7854687b, 0x5def9da0, 0x332383cd, 0x16987616, 0xeebbbf17, 0xcb004acc, + 0xa5cc54a1, 0x8077a17a, 0xa0cf60a4, 0x8574957f, 0xebb88b12, 0xce037ec9, + 0x3620b7c8, 0x139b4213, 0x7d575c7e, 0x58eca9a5, 0x88fcb88d, 0xad474d56, + 0xc38b533b, 0xe630a6e0, 0x1e136fe1, 0x3ba89a3a, 0x55648457, 0x70df718c, + 0xf0a8d0f6, 0xd513252d, 0xbbdf3b40, 0x9e64ce9b, 0x6647079a, 0x43fcf241, + 0x2d30ec2c, 0x088b19f7, 0xd89b08df, 0xfd20fd04, 0x93ece369, 0xb65716b2, + 0x4e74dfb3, 0x6bcf2a68, 0x05033405, 0x20b8c1de, 0x4472b7b9, 0x61c94262, + 0x0f055c0f, 0x2abea9d4, 0xd29d60d5, 0xf726950e, 0x99ea8b63, 0xbc517eb8, + 0x6c416f90, 0x49fa9a4b, 0x27368426, 0x028d71fd, 0xfaaeb8fc, 0xdf154d27, + 0xb1d9534a, 0x9462a691, 0x141507eb, 0x31aef230, 0x5f62ec5d, 0x7ad91986, + 0x82fad087, 0xa741255c, 0xc98d3b31, 0xec36ceea, 0x3c26dfc2, 0x199d2a19, + 0x77513474, 0x52eac1af, 0xaac908ae, 0x8f72fd75, 0xe1bee318, 0xc40516c3, + 0xe4bdd71d, 0xc10622c6, 0xafca3cab, 0x8a71c970, 0x72520071, 0x57e9f5aa, + 0x3925ebc7, 0x1c9e1e1c, 0xcc8e0f34, 0xe935faef, 0x87f9e482, 0xa2421159, + 0x5a61d858, 0x7fda2d83, 0x111633ee, 0x34adc635, 0xb4da674f, 0x91619294, + 0xffad8cf9, 0xda167922, 0x2235b023, 0x078e45f8, 0x69425b95, 0x4cf9ae4e, + 0x9ce9bf66, 0xb9524abd, 0xd79e54d0, 0xf225a10b, 0x0a06680a, 0x2fbd9dd1, + 0x417183bc, 0x64ca7667, 0x88e56f72, 0xad5e9aa9, 0xc39284c4, 0xe629711f, + 0x1e0ab81e, 0x3bb14dc5, 0x557d53a8, 0x70c6a673, 0xa0d6b75b, 0x856d4280, + 0xeba15ced, 0xce1aa936, 0x36396037, 0x138295ec, 0x7d4e8b81, 0x58f57e5a, + 0xd882df20, 0xfd392afb, 0x93f53496, 0xb64ec14d, 0x4e6d084c, 0x6bd6fd97, + 0x051ae3fa, 0x20a11621, 0xf0b10709, 0xd50af2d2, 0xbbc6ecbf, 0x9e7d1964, + 0x665ed065, 0x43e525be, 0x2d293bd3, 0x0892ce08, 0x282a0fd6, 0x0d91fa0d, + 0x635de460, 0x46e611bb, 0xbec5d8ba, 0x9b7e2d61, 0xf5b2330c, 0xd009c6d7, + 0x0019d7ff, 0x25a22224, 0x4b6e3c49, 0x6ed5c992, 0x96f60093, 0xb34df548, + 0xdd81eb25, 0xf83a1efe, 0x784dbf84, 0x5df64a5f, 0x333a5432, 0x1681a1e9, + 0xeea268e8, 0xcb199d33, 0xa5d5835e, 0x806e7685, 0x507e67ad, 0x75c59276, + 0x1b098c1b, 0x3eb279c0, 0xc691b0c1, 0xe32a451a, 0x8de65b77, 0xa85daeac, + 0xcc97d8cb, 0xe92c2d10, 0x87e0337d, 0xa25bc6a6, 0x5a780fa7, 0x7fc3fa7c, + 0x110fe411, 0x34b411ca, 0xe4a400e2, 0xc11ff539, 0xafd3eb54, 0x8a681e8f, + 0x724bd78e, 0x57f02255, 0x393c3c38, 0x1c87c9e3, 0x9cf06899, 0xb94b9d42, + 0xd787832f, 0xf23c76f4, 0x0a1fbff5, 0x2fa44a2e, 0x41685443, 0x64d3a198, + 0xb4c3b0b0, 0x9178456b, 0xffb45b06, 0xda0faedd, 0x222c67dc, 0x07979207, + 0x695b8c6a, 0x4ce079b1, 0x6c58b86f, 0x49e34db4, 0x272f53d9, 0x0294a602, + 0xfab76f03, 0xdf0c9ad8, 0xb1c084b5, 0x947b716e, 0x446b6046, 0x61d0959d, + 0x0f1c8bf0, 0x2aa77e2b, 0xd284b72a, 0xf73f42f1, 0x99f35c9c, 0xbc48a947, + 0x3c3f083d, 0x1984fde6, 0x7748e38b, 0x52f31650, 0xaad0df51, 0x8f6b2a8a, + 0xe1a734e7, 0xc41cc13c, 0x140cd014, 0x31b725cf, 0x5f7b3ba2, 0x7ac0ce79, + 0x82e30778, 0xa758f2a3, 0xc994ecce, 0xec2f1915}}; + +static uint32_t append_trivial(uint32_t crc, crc_stream input, size_t length) { + for (size_t i = 0; i < length; ++i) { + crc = crc ^ input[i]; + for (int j = 0; j < 8; j++) + crc = (crc >> 1) ^ 0x80000000 ^ ((~crc & 1) * POLY); + } + return crc; +} + +/* Table-driven software version as a fall-back. This is about 15 times slower + than using the hardware instructions. This assumes little-endian integers, + as is the case on Intel processors that the assembler code here is for. */ +static uint32_t append_adler_table(uint32_t crci, crc_stream input, + size_t length) { + crc_stream next = input; + uint64_t crc; + + crc = crci ^ 0xffffffff; + while (length && ((uintptr_t)next & 7) != 0) { + crc = table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); + --length; + } + while (length >= 8) { + crc ^= *(uint64_t *)next; + crc = table[7][crc & 0xff] ^ table[6][(crc >> 8) & 0xff] ^ + table[5][(crc >> 16) & 0xff] ^ table[4][(crc >> 24) & 0xff] ^ + table[3][(crc >> 32) & 0xff] ^ table[2][(crc >> 40) & 0xff] ^ + table[1][(crc >> 48) & 0xff] ^ table[0][crc >> 56]; + next += 8; + length -= 8; + } + while (length) { + crc = table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); + --length; + } + return (uint32_t)(crc ^ 0xffffffff); +} + +/* Table-driven software version as a fall-back. This is about 15 times slower + than using the hardware instructions. This assumes little-endian integers, + as is the case on Intel processors that the assembler code here is for. */ +uint32_t crc32c_sf(uint32_t crci, crc_stream input, size_t length) { + crc_stream next = input; +#ifdef _M_X64 + uint64_t crc; +#else + uint32_t crc; +#endif + + crc = crci ^ 0xffffffff; +#ifdef _M_X64 + while (length && ((uintptr_t)next & 7) != 0) { + crc = table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); + --length; + } + while (length >= 16) { + crc ^= *(uint64_t *)next; + uint64_t high = *(uint64_t *)(next + 8); + crc = table[15][crc & 0xff] ^ table[14][(crc >> 8) & 0xff] ^ + table[13][(crc >> 16) & 0xff] ^ table[12][(crc >> 24) & 0xff] ^ + table[11][(crc >> 32) & 0xff] ^ table[10][(crc >> 40) & 0xff] ^ + table[9][(crc >> 48) & 0xff] ^ table[8][crc >> 56] ^ + table[7][high & 0xff] ^ table[6][(high >> 8) & 0xff] ^ + table[5][(high >> 16) & 0xff] ^ table[4][(high >> 24) & 0xff] ^ + table[3][(high >> 32) & 0xff] ^ table[2][(high >> 40) & 0xff] ^ + table[1][(high >> 48) & 0xff] ^ table[0][high >> 56]; + next += 16; + length -= 16; + } +#else + while (length && ((uintptr_t)next & 3) != 0) { + crc = table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); + --length; + } + while (length >= 12) { + crc ^= *(uint32_t *)next; + uint32_t high = *(uint32_t *)(next + 4); + uint32_t high2 = *(uint32_t *)(next + 8); + crc = table[11][crc & 0xff] ^ table[10][(crc >> 8) & 0xff] ^ + table[9][(crc >> 16) & 0xff] ^ table[8][crc >> 24] ^ + table[7][high & 0xff] ^ table[6][(high >> 8) & 0xff] ^ + table[5][(high >> 16) & 0xff] ^ table[4][high >> 24] ^ + table[3][high2 & 0xff] ^ table[2][(high2 >> 8) & 0xff] ^ + table[1][(high2 >> 16) & 0xff] ^ table[0][high2 >> 24]; + next += 12; + length -= 12; + } +#endif + while (length) { + crc = table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); + --length; + } + return (uint32_t)crc ^ 0xffffffff; +} + +/* Apply the zeros operator table to crc. */ +static inline uint32_t shift_crc(uint32_t shift_table[][256], uint32_t crc) { + return shift_table[0][crc & 0xff] ^ shift_table[1][(crc >> 8) & 0xff] ^ + shift_table[2][(crc >> 16) & 0xff] ^ shift_table[3][crc >> 24]; +} + +/* Compute CRC-32C using the Intel hardware instruction. */ +uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len) { + crc_stream next = buf; + crc_stream end; +#ifdef _M_X64 + uint64_t crc0, crc1, crc2; /* need to be 64 bits for crc32q */ +#else + uint32_t crc0, crc1, crc2; +#endif + + /* pre-process the crc */ + crc0 = crc ^ 0xffffffff; + + /* compute the crc for up to seven leading bytes to bring the data pointer + to an eight-byte boundary */ + while (len && ((uintptr_t)next & 7) != 0) { + crc0 = _mm_crc32_u8((uint32_t)(crc0), *next); + ++next; + --len; + } + +#ifdef _M_X64 + /* compute the crc on sets of LONG_SHIFT*3 bytes, executing three independent + crc + instructions, each on LONG_SHIFT bytes -- this is optimized for the + Nehalem, + Westmere, Sandy Bridge, and Ivy Bridge architectures, which have a + throughput of one crc per cycle, but a latency of three cycles */ + while (len >= 3 * LONG_SHIFT) { + crc1 = 0; + crc2 = 0; + end = next + LONG_SHIFT; + do { + crc0 = _mm_crc32_u64(crc0, *(uint64_t *)(next)); + crc1 = _mm_crc32_u64(crc1, *(uint64_t *)(next + LONG_SHIFT)); + crc2 = _mm_crc32_u64(crc2, *(uint64_t *)(next + 2 * LONG_SHIFT)); + next += 8; + } while (next < end); + crc0 = shift_crc(long_shifts, (uint32_t)(crc0)) ^ crc1; + crc0 = shift_crc(long_shifts, (uint32_t)(crc0)) ^ crc2; + next += 2 * LONG_SHIFT; + len -= 3 * LONG_SHIFT; + } + + /* do the same thing, but now on SHORT_SHIFT*3 blocks for the remaining data + less + than a LONG_SHIFT*3 block */ + while (len >= 3 * SHORT_SHIFT) { + crc1 = 0; + crc2 = 0; + end = next + SHORT_SHIFT; + do { + crc0 = _mm_crc32_u64(crc0, *(uint64_t *)(next)); + crc1 = _mm_crc32_u64(crc1, *(uint64_t *)(next + SHORT_SHIFT)); + crc2 = _mm_crc32_u64(crc2, *(uint64_t *)(next + 2 * SHORT_SHIFT)); + next += 8; + } while (next < end); + crc0 = shift_crc(short_shifts, (uint32_t)(crc0)) ^ crc1; + crc0 = shift_crc(short_shifts, (uint32_t)(crc0)) ^ crc2; + next += 2 * SHORT_SHIFT; + len -= 3 * SHORT_SHIFT; + } + + /* compute the crc on the remaining eight-byte units less than a SHORT_SHIFT*3 + * block */ + end = next + (len - (len & 7)); + while (next < end) { + crc0 = _mm_crc32_u64(crc0, *(uint64_t *)(next)); + next += 8; + } +#else + /* compute the crc on sets of LONG_SHIFT*3 bytes, executing three independent + crc + instructions, each on LONG_SHIFT bytes -- this is optimized for the + Nehalem, + Westmere, Sandy Bridge, and Ivy Bridge architectures, which have a + throughput of one crc per cycle, but a latency of three cycles */ + while (len >= 3 * LONG_SHIFT) { + crc1 = 0; + crc2 = 0; + end = next + LONG_SHIFT; + do { + crc0 = _mm_crc32_u32(crc0, *(uint32_t *)(next)); + crc1 = _mm_crc32_u32(crc1, *(const uint32_t *)(next + LONG_SHIFT)); + crc2 = _mm_crc32_u32(crc2, *(const uint32_t *)(next + 2 * LONG_SHIFT)); + next += 4; + } while (next < end); + crc0 = shift_crc(long_shifts, (uint32_t)(crc0)) ^ crc1; + crc0 = shift_crc(long_shifts, (uint32_t)(crc0)) ^ crc2; + next += 2 * LONG_SHIFT; + len -= 3 * LONG_SHIFT; + } + + /* do the same thing, but now on SHORT_SHIFT*3 blocks for the remaining data + less + than a LONG_SHIFT*3 block */ + while (len >= 3 * SHORT_SHIFT) { + crc1 = 0; + crc2 = 0; + end = next + SHORT_SHIFT; + do { + crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)(next)); + crc1 = _mm_crc32_u32(crc1, *(const uint32_t *)(next + SHORT_SHIFT)); + crc2 = _mm_crc32_u32(crc2, *(const uint32_t *)(next + 2 * SHORT_SHIFT)); + next += 4; + } while (next < end); + crc0 = shift_crc(short_shifts, (uint32_t)(crc0)) ^ crc1; + crc0 = shift_crc(short_shifts, (uint32_t)(crc0)) ^ crc2; + next += 2 * SHORT_SHIFT; + len -= 3 * SHORT_SHIFT; + } + + /* compute the crc on the remaining eight-byte units less than a SHORT_SHIFT*3 + block */ + end = next + (len - (len & 7)); + while (next < end) { + crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)(next)); + next += 4; + } +#endif + len &= 7; + + /* compute the crc for up to seven trailing bytes */ + while (len) { + crc0 = _mm_crc32_u8((uint32_t)(crc0), *next); + ++next; + --len; + } + + /* return a post-processed crc */ + return (uint32_t)(crc0) ^ 0xffffffff; +} + +/* static void * resolve_crc32c(void) { */ +/* __builtin_cpu_init(); */ +/* if (__builtin_cpu_supports("sse4.2")) return crc32c_hw; */ +/* */ +/* return crc32c_sf; */ +/* } */ +/* */ +/* uint32_t crc32c(uint32_t crci, crc_stream bytes, size_t len) __attribute__ + * ((ifunc ("resolve_crc32c"))); */ + +#define SSE42(have) \ + do { \ + uint32_t eax, ecx; \ + eax = 1; \ + __asm__("cpuid" : "=c"(ecx) : "a"(eax) : "%ebx", "%edx"); \ + (have) = (ecx >> 20) & 1; \ + } while (0) + +/* Compute a CRC-32C. If the crc32 instruction is available, use the hardware + version. Otherwise, use the software version. */ +uint32_t (*crc32c)(uint32_t crci, crc_stream bytes, size_t len) = NULL; + +void taosResolveCRC() { + int sse42; + SSE42(sse42); + crc32c = sse42 ? crc32c_hw : crc32c_sf; + /* return sse42 ? crc32c_hw(crci, bytes, len) : crc32c_sf(crci, bytes, len); + */ +} + +#ifdef TEST_CRC32C_MAIN +#include +#include + +int main(int argc, char *argv[]) { + char str[1024] = "\0"; + char *ptr = str; + int count = 0; + while ((count = read(0, ptr, 10)) > 0) { + ptr += count; + } + printf("str: %s\n", str); + uint32_t len = strlen(str); + uint32_t crc1 = crc32c_hw(0, str, len); + uint32_t crc2 = crc32c_sf(0, str, len); + uint32_t crc3 = crc32c(0, str, len); + printf("0x%08x 0x%08x\n", crc1, crc2); + + if (crc1 != crc2) { + printf("Mismatch NNNNNNNNNNNNNN\n"); + } else { + printf("Match OOOOOOOOOOOOOO\n"); + } +} +#endif diff --git a/src/util/src/textbuffer.c b/src/util/src/textbuffer.c new file mode 100644 index 000000000000..5b2885a58c7c --- /dev/null +++ b/src/util/src/textbuffer.c @@ -0,0 +1,1988 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "taos.h" +#include "taosmsg.h" +#include "textbuffer.h" +#include "tlog.h" +#include "tsql.h" +#include "tsqlfunction.h" +#include "ttime.h" +#include "ttypes.h" + +#pragma GCC diagnostic ignored "-Wformat" + +#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \ + (data + (schema)->colOffset[colId] * (allrow) + (rowId) * (schema)->pFields[colId].bytes) + +void getExtTmpfilePath(const char *fileNamePattern, int64_t serialNumber, int32_t seg, int32_t slot, char *dstPath) { + char tmpPath[512] = {0}; + + char *tmpDir = NULL; + + tmpDir = "/tmp/"; + + strcat(tmpPath, tmpDir); + strcat(tmpPath, fileNamePattern); + + int32_t ret = sprintf(dstPath, tmpPath, taosGetTimestampUs(), serialNumber, seg, slot); + dstPath[ret] = 0; // ensure null-terminated string +} + +/* + * tColModel is deeply copy + */ +void tExtMemBufferCreate(tExtMemBuffer **pMemBuffer, int32_t nBufferSize, int32_t elemSize, const char *tmpDataFilePath, + tColModel *pModel) { + (*pMemBuffer) = (tExtMemBuffer *)calloc(1, sizeof(tExtMemBuffer)); + + (*pMemBuffer)->nPageSize = DEFAULT_PAGE_SIZE; + (*pMemBuffer)->nMaxSizeInPages = ALIGN8(nBufferSize) / (*pMemBuffer)->nPageSize; + (*pMemBuffer)->nElemSize = elemSize; + + (*pMemBuffer)->numOfElemsPerPage = ((*pMemBuffer)->nPageSize - sizeof(tFilePage)) / (*pMemBuffer)->nElemSize; + + strcpy((*pMemBuffer)->dataFilePath, tmpDataFilePath); + + tFileMeta *pFMeta = &(*pMemBuffer)->fileMeta; + + pFMeta->numOfElemsInFile = 0; + pFMeta->nFileSize = 0; + pFMeta->nPageSize = DEFAULT_PAGE_SIZE; + + pFMeta->flushoutData.nAllocSize = 4; + pFMeta->flushoutData.nLength = 0; + pFMeta->flushoutData.pFlushoutInfo = (tFlushoutInfo *)calloc(4, sizeof(tFlushoutInfo)); + + (*pMemBuffer)->pColModel = tColModelCreate(pModel->pFields, pModel->numOfCols, (*pMemBuffer)->numOfElemsPerPage); +} + +void tExtMemBufferDestroy(tExtMemBuffer **pMemBuffer) { + if ((*pMemBuffer) == NULL) { + return; + } + + // release flush out info link + tFileMeta *pFileMeta = &(*pMemBuffer)->fileMeta; + if (pFileMeta->flushoutData.nAllocSize != 0 && pFileMeta->flushoutData.pFlushoutInfo != NULL) { + tfree(pFileMeta->flushoutData.pFlushoutInfo); + } + + // release all in-memory buffer pages + tFilePagesItem *pFilePages = (*pMemBuffer)->pHead; + while (pFilePages != NULL) { + tFilePagesItem *pTmp = pFilePages; + pFilePages = pFilePages->pNext; + tfree(pTmp); + } + + // close temp file + if ((*pMemBuffer)->dataFile != 0) { + int32_t ret = fclose((*pMemBuffer)->dataFile); + if (ret != 0) { + pError("failed to close file:%s, reason:%s", (*pMemBuffer)->dataFilePath, strerror(errno)); + } + unlink((*pMemBuffer)->dataFilePath); + } + + tColModelDestroy((*pMemBuffer)->pColModel); + + tfree(*pMemBuffer); +} + +/* + * alloc more memory for flush out info entries. + */ +static bool allocFlushoutInfoEntries(tFileMeta *pFileMeta) { + pFileMeta->flushoutData.nAllocSize = pFileMeta->flushoutData.nAllocSize << 1; + + tFlushoutInfo *tmp = (tFlushoutInfo *)realloc(pFileMeta->flushoutData.pFlushoutInfo, + sizeof(tFlushoutInfo) * pFileMeta->flushoutData.nAllocSize); + if (tmp == NULL) { + pError("out of memory!\n"); + return false; + } + + pFileMeta->flushoutData.pFlushoutInfo = tmp; + return true; +} + +bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) { + if (pMemBuffer->numOfPagesInMem > 0 && pMemBuffer->numOfPagesInMem == pMemBuffer->nMaxSizeInPages) { + /* + * the in-mem buffer is full. + * To flush data to disk to accommodate more data + */ + if (!tExtMemBufferFlush(pMemBuffer)) { + return false; + } + } + + /* + * We do not recycle the file page structure. And in flush data operations, all + * filepage that are full of data are destroyed after data being flushed to disk. + * + * The memory buffer pages may be recycle in order to avoid unnecessary memory + * allocation later. + */ + tFilePagesItem *item = (tFilePagesItem *)calloc(1, pMemBuffer->nPageSize + sizeof(tFilePagesItem)); + if (item == NULL) { + return false; + } + + item->pNext = NULL; + item->item.numOfElems = 0; + + if (pMemBuffer->pTail != NULL) { + pMemBuffer->pTail->pNext = item; + pMemBuffer->pTail = item; + } else { + pMemBuffer->pTail = item; + pMemBuffer->pHead = item; + } + + pMemBuffer->numOfPagesInMem += 1; + + return true; +} + +/* + * put elements into buffer + */ +int16_t tExtMemBufferPut(tExtMemBuffer *pMemBuffer, void *data, int32_t numOfRows) { + if (numOfRows == 0) { + return pMemBuffer->numOfPagesInMem; + } + + tFilePagesItem *pLast = pMemBuffer->pTail; + if (pLast == NULL) { + if (!tExtMemBufferAlloc(pMemBuffer)) { + return -1; + } + + pLast = pMemBuffer->pTail; + } + + if (pLast->item.numOfElems + numOfRows <= pMemBuffer->numOfElemsPerPage) { + // enough space for records + tColModelAppend(pMemBuffer->pColModel, &pLast->item, data, 0, numOfRows, numOfRows); + pMemBuffer->numOfElemsInBuffer += numOfRows; + pMemBuffer->numOfAllElems += numOfRows; + } else { + int32_t numOfRemainEntries = pMemBuffer->numOfElemsPerPage - pLast->item.numOfElems; + tColModelAppend(pMemBuffer->pColModel, &pLast->item, data, 0, numOfRemainEntries, numOfRows); + + pMemBuffer->numOfElemsInBuffer += numOfRemainEntries; + pMemBuffer->numOfAllElems += numOfRemainEntries; + + int32_t hasWritten = numOfRemainEntries; + int32_t remain = numOfRows - numOfRemainEntries; + + while (remain > 0) { + if (!tExtMemBufferAlloc(pMemBuffer)) { + // failed to allocate memory buffer + return -1; + } + + int32_t numOfWriteElems = 0; + if (remain > pMemBuffer->numOfElemsPerPage) { + numOfWriteElems = pMemBuffer->numOfElemsPerPage; + } else { + numOfWriteElems = remain; + } + + pMemBuffer->numOfAllElems += numOfWriteElems; + + pLast = pMemBuffer->pTail; + tColModelAppend(pMemBuffer->pColModel, &pLast->item, data, hasWritten, numOfWriteElems, numOfRows); + + remain -= numOfWriteElems; + pMemBuffer->numOfElemsInBuffer += numOfWriteElems; + hasWritten += numOfWriteElems; + } + } + + return pMemBuffer->numOfPagesInMem; +} + +static bool tExtMemBufferUpdateFlushoutInfo(tExtMemBuffer *pMemBuffer) { + tFileMeta *pFileMeta = &pMemBuffer->fileMeta; + + if (pMemBuffer->flushModel == MULTIPLE_APPEND_MODEL) { + if (pFileMeta->flushoutData.nLength == pFileMeta->flushoutData.nAllocSize && !allocFlushoutInfoEntries(pFileMeta)) { + return false; + } + + tFlushoutInfo *pFlushoutInfo = &pFileMeta->flushoutData.pFlushoutInfo[pFileMeta->flushoutData.nLength]; + if (pFileMeta->flushoutData.nLength == 0) { + pFlushoutInfo->startPageId = 0; + } else { + pFlushoutInfo->startPageId = + pFileMeta->flushoutData.pFlushoutInfo[pFileMeta->flushoutData.nLength - 1].startPageId + + pFileMeta->flushoutData.pFlushoutInfo[pFileMeta->flushoutData.nLength - 1].numOfPages; + } + + // only the page still in buffer is flushed out to disk + pFlushoutInfo->numOfPages = pMemBuffer->numOfPagesInMem; + pFileMeta->flushoutData.nLength += 1; + } else { + // always update the first flushout array in single_flush_model + pFileMeta->flushoutData.nLength = 1; + tFlushoutInfo *pFlushoutInfo = &pFileMeta->flushoutData.pFlushoutInfo[0]; + pFlushoutInfo->numOfPages += pMemBuffer->numOfPagesInMem; + } + + return true; +} + +static void tExtMemBufferClearFlushoutInfo(tExtMemBuffer *pMemBuffer) { + tFileMeta *pFileMeta = &pMemBuffer->fileMeta; + + pFileMeta->flushoutData.nLength = 0; + memset(pFileMeta->flushoutData.pFlushoutInfo, 0, sizeof(tFlushoutInfo) * pFileMeta->flushoutData.nAllocSize); +} + +bool tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { + if (pMemBuffer->numOfAllElems == 0) { + return true; + } + + if (pMemBuffer->dataFile == NULL) { + if ((pMemBuffer->dataFile = fopen(pMemBuffer->dataFilePath, "wb+")) == NULL) { + return false; + } + } + + if (pMemBuffer->numOfElemsInBuffer == 0) { + /* all data has been flushed to disk, ignore flush operation */ + return true; + } + + bool ret = true; + tFilePagesItem *first = pMemBuffer->pHead; + + while (first != NULL) { + size_t retVal = fwrite((char *)&(first->item), pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + if (retVal <= 0) { // failed to write to buffer, may be not enough space + ret = false; + } + + pMemBuffer->fileMeta.numOfElemsInFile += first->item.numOfElems; + pMemBuffer->fileMeta.nFileSize += 1; + + tFilePagesItem *ptmp = first; + first = first->pNext; + + tfree(ptmp); // release all data in memory buffer + } + + fflush(pMemBuffer->dataFile); // flush to disk + + tExtMemBufferUpdateFlushoutInfo(pMemBuffer); + + pMemBuffer->numOfElemsInBuffer = 0; + pMemBuffer->numOfPagesInMem = 0; + pMemBuffer->pHead = NULL; + pMemBuffer->pTail = NULL; + + return ret; +} + +void tExtMemBufferClear(tExtMemBuffer *pMemBuffer) { + if (pMemBuffer == NULL || pMemBuffer->numOfAllElems == 0) return; + + /* + * release all data in memory buffer + */ + tFilePagesItem *first = pMemBuffer->pHead; + while (first != NULL) { + tFilePagesItem *ptmp = first; + first = first->pNext; + tfree(ptmp); + } + + pMemBuffer->fileMeta.numOfElemsInFile = 0; + pMemBuffer->fileMeta.nFileSize = 0; + + pMemBuffer->numOfElemsInBuffer = 0; + pMemBuffer->numOfPagesInMem = 0; + pMemBuffer->pHead = NULL; + pMemBuffer->pTail = NULL; + + tExtMemBufferClearFlushoutInfo(pMemBuffer); + + if (pMemBuffer->dataFile != NULL) { + // reset the write pointer to the header + fseek(pMemBuffer->dataFile, 0, SEEK_SET); + } +} + +bool tExtMemBufferLoadData(tExtMemBuffer *pMemBuffer, tFilePage *pFilePage, int32_t flushoutId, int32_t pageIdx) { + if (flushoutId < 0 || flushoutId > pMemBuffer->fileMeta.flushoutData.nLength) { + return false; + } + + tFlushoutInfo *pInfo = &(pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[flushoutId]); + if (pageIdx > (int32_t)pInfo->numOfPages) { + return false; + } + + size_t ret = fseek(pMemBuffer->dataFile, (pInfo->startPageId + pageIdx) * pMemBuffer->nPageSize, SEEK_SET); + ret = fread(pFilePage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + + return (ret > 0); +} + +bool tExtMemBufferIsAllDataInMem(tExtMemBuffer *pMemBuffer) { return (pMemBuffer->fileMeta.nFileSize == 0); } + +////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// TODO safty check in result +void tBucketBigIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { + int64_t v = *(int64_t *)value; + + if (pBucket->nRange.i64MaxVal == INT64_MIN) { + if (v >= 0) { + *segIdx = ((v >> (64 - 9)) >> 6) + 8; + *slotIdx = (v >> (64 - 9)) & 0x3F; + } else { // v<0 + *segIdx = ((-v) >> (64 - 9)) >> 6; + *slotIdx = ((-v) >> (64 - 9)) & 0x3F; + *segIdx = 7 - (*segIdx); + } + } else { + // todo hash for bigint and float and double + int64_t span = pBucket->nRange.i64MaxVal - pBucket->nRange.i64MinVal; + if (span < pBucket->nTotalSlots) { + int32_t delta = (int32_t)(v - pBucket->nRange.i64MinVal); + *segIdx = delta / pBucket->nSlotsOfSeg; + *slotIdx = delta % pBucket->nSlotsOfSeg; + } else { + double x = (double)span / pBucket->nTotalSlots; + double posx = (v - pBucket->nRange.i64MinVal) / x; + if (v == pBucket->nRange.i64MaxVal) { + posx -= 1; + } + + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } + } +} + +// todo refactor to more generic +void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { + int32_t v = *(int32_t *)value; + + if (pBucket->nRange.iMaxVal == INT32_MIN) { + /* + * taking negative integer into consideration, + * there is only half of pBucket->segs available for non-negative integer + */ + // int32_t numOfSlots = pBucket->nTotalSlots>>1; + // int32_t bits = bitsOfNumber(numOfSlots)-1; + + if (v >= 0) { + *segIdx = ((v >> (32 - 9)) >> 6) + 8; + *slotIdx = (v >> (32 - 9)) & 0x3F; + } else { // v<0 + *segIdx = ((-v) >> (32 - 9)) >> 6; + *slotIdx = ((-v) >> (32 - 9)) & 0x3F; + *segIdx = 7 - (*segIdx); + } + } else { + // divide a range of [iMinVal, iMaxVal] into 1024 buckets + int32_t span = pBucket->nRange.iMaxVal - pBucket->nRange.iMinVal; + if (span < pBucket->nTotalSlots) { + int32_t delta = v - pBucket->nRange.iMinVal; + *segIdx = delta / pBucket->nSlotsOfSeg; + *slotIdx = delta % pBucket->nSlotsOfSeg; + } else { + double x = (double)span / pBucket->nTotalSlots; + double posx = (v - pBucket->nRange.iMinVal) / x; + if (v == pBucket->nRange.iMaxVal) { + posx -= 1; + } + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } + } +} + +void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx) { + double v = *(double *)value; + + if (pBucket->nRange.dMinVal == DBL_MAX) { + /* + * taking negative integer into consideration, + * there is only half of pBucket->segs available for non-negative integer + */ + double x = DBL_MAX / (pBucket->nTotalSlots >> 1); + double posx = (v + DBL_MAX) / x; + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } else { + // divide a range of [dMinVal, dMaxVal] into 1024 buckets + double span = pBucket->nRange.dMaxVal - pBucket->nRange.dMinVal; + if (span < pBucket->nTotalSlots) { + int32_t delta = (int32_t)(v - pBucket->nRange.dMinVal); + *segIdx = delta / pBucket->nSlotsOfSeg; + *slotIdx = delta % pBucket->nSlotsOfSeg; + } else { + double x = span / pBucket->nTotalSlots; + double posx = (v - pBucket->nRange.dMinVal) / x; + if (v == pBucket->nRange.dMaxVal) { + posx -= 1; + } + *segIdx = ((int32_t)posx) / pBucket->nSlotsOfSeg; + *slotIdx = ((int32_t)posx) % pBucket->nSlotsOfSeg; + } + + if (*segIdx < 0 || *segIdx > 16 || *slotIdx < 0 || *slotIdx > 64) { + pError("error in hash process. segment is: %d, slot id is: %d\n", *segIdx, *slotIdx); + } + } +} + +void tMemBucketCreate(tMemBucket **pBucket, int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, + int16_t dataType, tOrderDescriptor *pDesc) { + *pBucket = (tMemBucket *)malloc(sizeof(tMemBucket)); + + (*pBucket)->nTotalSlots = totalSlots; + (*pBucket)->nSlotsOfSeg = 1 << 6; // 64 Segments, 16 slots each seg. + (*pBucket)->dataType = dataType; + (*pBucket)->nElemSize = nElemSize; + (*pBucket)->nPageSize = DEFAULT_PAGE_SIZE; + + (*pBucket)->numOfElems = 0; + (*pBucket)->numOfSegs = (*pBucket)->nTotalSlots / (*pBucket)->nSlotsOfSeg; + + (*pBucket)->nTotalBufferSize = nBufferSize; + + (*pBucket)->maxElemsCapacity = (*pBucket)->nTotalBufferSize / (*pBucket)->nElemSize; + + (*pBucket)->numOfTotalPages = (*pBucket)->nTotalBufferSize / (*pBucket)->nPageSize; + (*pBucket)->numOfAvailPages = (*pBucket)->numOfTotalPages; + + (*pBucket)->pOrderDesc = pDesc; + + switch ((*pBucket)->dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + (*pBucket)->nRange.iMinVal = INT32_MAX; + (*pBucket)->nRange.iMaxVal = INT32_MIN; + (*pBucket)->HashFunc = tBucketIntHash; + break; + }; + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: { + (*pBucket)->nRange.dMinVal = DBL_MAX; + (*pBucket)->nRange.dMaxVal = -DBL_MAX; + (*pBucket)->HashFunc = tBucketDoubleHash; + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + (*pBucket)->nRange.i64MinVal = INT64_MAX; + (*pBucket)->nRange.i64MaxVal = INT64_MIN; + (*pBucket)->HashFunc = tBucketBigIntHash; + break; + }; + default: { + pError("MemBucket:%p,not support data type %d,failed", *pBucket, (*pBucket)->dataType); + tfree(*pBucket); + return; + } + } + + if (pDesc->pSchema->numOfCols != 1 || pDesc->pSchema->colOffset[0] != 0) { + pError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d or offset:%d", + *pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]); + tfree(*pBucket); + return; + } + + if (pDesc->pSchema->pFields[0].type != dataType) { + pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", *pBucket, + pDesc->pSchema->pFields[0].type, dataType); + tfree(*pBucket); + return; + } + + if ((*pBucket)->numOfTotalPages < (*pBucket)->nTotalSlots) { + pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", *pBucket, (*pBucket)->numOfTotalPages); + } + + (*pBucket)->pSegs = (tMemBucketSegment *)malloc((*pBucket)->numOfSegs * sizeof(tMemBucketSegment)); + + for (int32_t i = 0; i < (*pBucket)->numOfSegs; ++i) { + (*pBucket)->pSegs[i].numOfSlots = (*pBucket)->nSlotsOfSeg; + (*pBucket)->pSegs[i].pBuffer = NULL; + (*pBucket)->pSegs[i].pBoundingEntries = NULL; + } + + pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", *pBucket, (*pBucket)->numOfTotalPages * DEFAULT_PAGE_SIZE, + (*pBucket)->nElemSize); +} + +void tMemBucketDestroy(tMemBucket **pBucket) { + if (*pBucket == NULL) { + return; + } + + if ((*pBucket)->pSegs) { + for (int32_t i = 0; i < (*pBucket)->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &((*pBucket)->pSegs[i]); + tfree(pSeg->pBoundingEntries); + + if (pSeg->pBuffer == NULL || pSeg->numOfSlots == 0) { + continue; + } + + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + if (pSeg->pBuffer[j] != NULL) { + tExtMemBufferDestroy(&pSeg->pBuffer[j]); + } + } + tfree(pSeg->pBuffer); + } + } + + tfree((*pBucket)->pSegs); + tfree(*pBucket); +} + +/* + * find the slots which accounts for largest proportion of total in-memory buffer + */ +static void tBucketGetMaxMemSlot(tMemBucket *pBucket, int16_t *segIdx, int16_t *slotIdx) { + *segIdx = -1; + *slotIdx = -1; + + int32_t val = 0; + for (int32_t k = 0; k < pBucket->numOfSegs; ++k) { + tMemBucketSegment *pSeg = &pBucket->pSegs[k]; + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + if (pSeg->pBuffer == NULL || pSeg->pBuffer[i] == NULL) { + continue; + } + + if (val < pSeg->pBuffer[i]->numOfPagesInMem) { + val = pSeg->pBuffer[i]->numOfPagesInMem; + *segIdx = k; + *slotIdx = i; + } + } + } +} + +static void resetBoundingBox(tMemBucketSegment *pSeg, int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BIGINT: { + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + pSeg->pBoundingEntries[i].i64MaxVal = INT64_MIN; + pSeg->pBoundingEntries[i].i64MinVal = INT64_MAX; + } + break; + }; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + pSeg->pBoundingEntries[i].iMaxVal = INT32_MIN; + pSeg->pBoundingEntries[i].iMinVal = INT32_MAX; + } + break; + }; + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: { + for (int32_t i = 0; i < pSeg->numOfSlots; ++i) { + pSeg->pBoundingEntries[i].dMaxVal = -DBL_MAX; + pSeg->pBoundingEntries[i].dMinVal = DBL_MAX; + } + break; + } + } +} + +void tMemBucketUpdateBoundingBox(MinMaxEntry *r, char *data, int32_t dataType) { + switch (dataType) { + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t *)data; + if (r->iMinVal > val) { + r->iMinVal = val; + } + + if (r->iMaxVal < val) { + r->iMaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + int64_t val = *(int64_t *)data; + if (r->i64MinVal > val) { + r->i64MinVal = val; + } + + if (r->i64MaxVal < val) { + r->i64MaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + int32_t val = *(int16_t *)data; + if (r->iMinVal > val) { + r->iMinVal = val; + } + + if (r->iMaxVal < val) { + r->iMaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + int32_t val = *(int8_t *)data; + if (r->iMinVal > val) { + r->iMinVal = val; + } + + if (r->iMaxVal < val) { + r->iMaxVal = val; + } + + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + double val = *(double *)data; + if (r->dMinVal > val) { + r->dMinVal = val; + } + + if (r->dMaxVal < val) { + r->dMaxVal = val; + } + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + double val = *(float *)data; + + if (r->dMinVal > val) { + r->dMinVal = val; + } + + if (r->dMaxVal < val) { + r->dMaxVal = val; + } + break; + }; + default: { assert(false); } + } +} + +/* + * in memory bucket, we only accept the simple data consecutive put in a row/column + * no column-model in this case. + */ +void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows) { + pBucket->numOfElems += numOfRows; + int16_t segIdx = 0, slotIdx = 0; + + for (int32_t i = 0; i < numOfRows; ++i) { + char *d = (char *)data + i * tDataTypeDesc[pBucket->dataType].nSize; + + switch (pBucket->dataType) { + case TSDB_DATA_TYPE_SMALLINT: { + int32_t val = *(int16_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_TINYINT: { + int32_t val = *(int8_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t val = *(int64_t *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + double val = *(double *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + double val = *(float *)d; + (pBucket->HashFunc)(pBucket, &val, &segIdx, &slotIdx); + break; + } + } + + tMemBucketSegment *pSeg = &pBucket->pSegs[segIdx]; + if (pSeg->pBoundingEntries == NULL) { + pSeg->pBoundingEntries = (MinMaxEntry *)malloc(sizeof(MinMaxEntry) * pBucket->nSlotsOfSeg); + resetBoundingBox(pSeg, pBucket->dataType); + } + + if (pSeg->pBuffer == NULL) { + pSeg->pBuffer = (tExtMemBuffer **)calloc(pBucket->nSlotsOfSeg, sizeof(void *)); + } + + if (pSeg->pBuffer[slotIdx] == NULL) { + int64_t pid = taosGetPthreadId(); + char name[512] = {0}; + getExtTmpfilePath("/tb_ex_bk_%lld_%lld_%d_%d", pid, segIdx, slotIdx, name); + tExtMemBufferCreate(&pSeg->pBuffer[slotIdx], pBucket->numOfTotalPages * pBucket->nPageSize, pBucket->nElemSize, + name, pBucket->pOrderDesc->pSchema); + pSeg->pBuffer[slotIdx]->flushModel = SINGLE_APPEND_MODEL; + pBucket->pOrderDesc->pSchema->maxCapacity = pSeg->pBuffer[slotIdx]->numOfElemsPerPage; + } + + tMemBucketUpdateBoundingBox(&pSeg->pBoundingEntries[slotIdx], d, pBucket->dataType); + + // ensure available memory pages to allocate + int16_t cseg = 0, cslot = 0; + if (pBucket->numOfAvailPages == 0) { + pTrace("MemBucket:%p,max avail size:%d, no avail memory pages,", pBucket, pBucket->numOfTotalPages); + + tBucketGetMaxMemSlot(pBucket, &cseg, &cslot); + if (cseg == -1 || cslot == -1) { + pError("MemBucket:%p,failed to find appropriated avail buffer", pBucket); + return; + } + + if (cseg != segIdx || cslot != slotIdx) { + pBucket->numOfAvailPages += pBucket->pSegs[cseg].pBuffer[cslot]->numOfPagesInMem; + + int32_t avail = pBucket->pSegs[cseg].pBuffer[cslot]->numOfPagesInMem; + UNUSED(avail); + tExtMemBufferFlush(pBucket->pSegs[cseg].pBuffer[cslot]); + + pTrace("MemBucket:%p,seg:%d,slot:%d flushed to disk,new avail pages:%d", pBucket, cseg, cslot, + pBucket->numOfAvailPages); + } else { + pTrace("MemBucket:%p,failed to choose slot to flush to disk seg:%d,slot:%d", + pBucket, cseg, cslot); + } + } + int16_t consumedPgs = pSeg->pBuffer[slotIdx]->numOfPagesInMem; + + int16_t newPgs = tExtMemBufferPut(pSeg->pBuffer[slotIdx], d, 1); + /* + * trigger 1. page re-allocation, to reduce the available pages + * 2. page flushout, to increase the available pages + */ + pBucket->numOfAvailPages += (consumedPgs - newPgs); + } +} + +void releaseBucket(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { + if (segIdx < 0 || segIdx > pMemBucket->numOfSegs || slotIdx < 0) { + return; + } + + tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; + if (slotIdx < 0 || slotIdx >= pSeg->numOfSlots || pSeg->pBuffer[slotIdx] == NULL) { + return; + } + + tExtMemBufferDestroy(&pSeg->pBuffer[slotIdx]); +} + +static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t colIdx, int32_t tsOrder) { + if (f1 == f2) { + return 0; + } + + if (colIdx == 0 && tsOrder == TSQL_SO_DESC) { // primary column desc order + return (f1 < f2) ? 1 : -1; + } else { // asc + return (f1 < f2) ? -1 : 1; + } +} + +static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) { + switch (type) { + case TSDB_DATA_TYPE_INT: { + int32_t first = *(int32_t *)f1; + int32_t second = *(int32_t *)f2; + if (first == second) { + return 0; + } + return (first < second) ? -1 : 1; + }; + case TSDB_DATA_TYPE_DOUBLE: { + double first = *(double *)f1; + double second = *(double *)f2; + if (first == second) { + return 0; + } + return (first < second) ? -1 : 1; + }; + case TSDB_DATA_TYPE_FLOAT: { + float first = *(float *)f1; + float second = *(float *)f2; + if (first == second) { + return 0; + } + return (first < second) ? -1 : 1; + }; + case TSDB_DATA_TYPE_BIGINT: { + int64_t first = *(int64_t *)f1; + int64_t second = *(int64_t *)f2; + if (first == second) { + return 0; + } + return (first < second) ? -1 : 1; + }; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t first = *(int16_t *)f1; + int16_t second = *(int16_t *)f2; + if (first == second) { + return 0; + } + return (first < second) ? -1 : 1; + }; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + int8_t first = *(int8_t *)f1; + int8_t second = *(int8_t *)f2; + if (first == second) { + return 0; + } + return (first < second) ? -1 : 1; + }; + case TSDB_DATA_TYPE_BINARY: { + int32_t ret = strncmp(f1, f2, bytes); + if (ret == 0) { + return 0; + } + return (ret < 0) ? -1 : 1; + }; + case TSDB_DATA_TYPE_NCHAR: { + int32_t b = bytes / TSDB_NCHAR_SIZE; + int32_t ret = wcsncmp((wchar_t *)f1, (wchar_t *)f2, b); + if (ret == 0) { + return 0; + } + return (ret < 0) ? -1 : 1; + }; + } + + return 0; +} + +int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, char *data1, int32_t numOfRows2, + int32_t s2, char *data2) { + assert(numOfRows1 == numOfRows2); + + int32_t cmpCnt = pDescriptor->orderIdx.numOfOrderedCols; + for (int32_t i = 0; i < cmpCnt; ++i) { + int32_t colIdx = pDescriptor->orderIdx.pData[i]; + + char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, numOfRows1, s1, colIdx); + char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pSchema, numOfRows2, s2, colIdx); + + if (pDescriptor->pSchema->pFields[colIdx].type == TSDB_DATA_TYPE_TIMESTAMP) { + int32_t ret = primaryKeyComparator(*(int64_t *)f1, *(int64_t *)f2, colIdx, pDescriptor->tsOrder); + if (ret == 0) { + continue; + } else { + return ret; + } + } else { + SSchema *pSchema = &pDescriptor->pSchema->pFields[colIdx]; + int32_t ret = columnValueAscendingComparator(f1, f2, pSchema->type, pSchema->bytes); + if (ret == 0) { + continue; + } else { + return ret; + } + } + } + + return 0; +} + +int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, char *data1, int32_t numOfRows2, + int32_t s2, char *data2) { + assert(numOfRows1 == numOfRows2); + + int32_t cmpCnt = pDescriptor->orderIdx.numOfOrderedCols; + for (int32_t i = 0; i < cmpCnt; ++i) { + int32_t colIdx = pDescriptor->orderIdx.pData[i]; + + char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, numOfRows1, s1, colIdx); + char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pSchema, numOfRows2, s2, colIdx); + + if (pDescriptor->pSchema->pFields[colIdx].type == TSDB_DATA_TYPE_TIMESTAMP) { + int32_t ret = primaryKeyComparator(*(int64_t *)f1, *(int64_t *)f2, colIdx, pDescriptor->tsOrder); + if (ret == 0) { + continue; + } else { + return ret; + } + } else { + SSchema *pSchema = &pDescriptor->pSchema->pFields[colIdx]; + int32_t ret = columnValueAscendingComparator(f1, f2, pSchema->type, pSchema->bytes); + if (ret == 0) { + continue; + } else { + return -ret; // descending order + } + } + } + + return 0; +} +FORCE_INLINE int32_t compare_sa(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t idx1, int32_t idx2, + char *data) { + return compare_a(pDescriptor, numOfRows, idx1, data, numOfRows, idx2, data); +} + +FORCE_INLINE int32_t compare_sd(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t idx1, int32_t idx2, + char *data) { + return compare_d(pDescriptor, numOfRows, idx1, data, numOfRows, idx2, data); +} + +static void swap(tOrderDescriptor *pDescriptor, int32_t count, int32_t s1, char *data1, int32_t s2) { + for (int32_t i = 0; i < pDescriptor->pSchema->numOfCols; ++i) { + void *first = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, count, s1, i); + void *second = COLMODEL_GET_VAL(data1, pDescriptor->pSchema, count, s2, i); + + tsDataSwap(first, second, pDescriptor->pSchema->pFields[i].type, pDescriptor->pSchema->pFields[i].bytes); + } +} + +static void tColDataInsertSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, + __col_compar_fn_t compareFn) { + for (int32_t i = start + 1; i <= end; ++i) { + for (int32_t j = i; j > start; --j) { + if (compareFn(pDescriptor, numOfRows, j, j - 1, data) == -1) { + swap(pDescriptor, numOfRows, j - 1, data, j); + } else { + break; + } + } + } +} + +static void UNUSED_FUNC tSortDataPrint(int32_t type, char *prefix, char *startx, char *midx, char *endx) { + switch (type) { + case TSDB_DATA_TYPE_INT: + printf("%s:(%d, %d, %d)\n", prefix, *(int32_t *)startx, *(int32_t *)midx, *(int32_t *)endx); + break; + case TSDB_DATA_TYPE_TINYINT: + printf("%s:(%d, %d, %d)\n", prefix, *(int8_t *)startx, *(int8_t *)midx, *(int8_t *)endx); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%s:(%d, %d, %d)\n", prefix, *(int16_t *)startx, *(int16_t *)midx, *(int16_t *)endx); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + printf("%s:(%lld, %lld, %lld)\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%s:(%f, %f, %f)\n", prefix, *(float *)startx, *(float *)midx, *(float *)endx); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%s:(%lf, %lf, %lf)\n", prefix, *(double *)startx, *(double *)midx, *(double *)endx); + break; + case TSDB_DATA_TYPE_BINARY: + printf("%s:(%s, %s, %s)\n", prefix, startx, midx, endx); + break; + } +} + +static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, + __col_compar_fn_t compareFn) { + int32_t midIdx = ((end - start) >> 1) + start; + +#if defined(_DEBUG_VIEW) + int32_t f = pDescriptor->orderIdx.pData[0]; + + char *midx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, midIdx, f); + char *startx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, start, f); + char *endx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, end, f); + + int32_t colIdx = pDescriptor->orderIdx.pData[0]; + tSortDataPrint(pDescriptor->pSchema->pFields[colIdx].type, "before", startx, midx, endx); +#endif + + if (compareFn(pDescriptor, numOfRows, midIdx, start, data) == 1) { + swap(pDescriptor, numOfRows, start, data, midIdx); + } + + if (compareFn(pDescriptor, numOfRows, midIdx, end, data) == 1) { + swap(pDescriptor, numOfRows, midIdx, data, start); + swap(pDescriptor, numOfRows, midIdx, data, end); + } else if (compareFn(pDescriptor, numOfRows, start, end, data) == 1) { + swap(pDescriptor, numOfRows, start, data, end); + } + + assert(compareFn(pDescriptor, numOfRows, midIdx, start, data) <= 0 && + compareFn(pDescriptor, numOfRows, start, end, data) <= 0); + +#if defined(_DEBUG_VIEW) + midx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, midIdx, f); + startx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, start, f); + endx = COLMODEL_GET_VAL(data, pDescriptor->pSchema, numOfRows, end, f); + tSortDataPrint(pDescriptor->pSchema->pFields[colIdx].type, "after", startx, midx, endx); +#endif +} + +static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t numOfRows, char *d, int32_t len) { + int32_t colIdx = pDescriptor->orderIdx.pData[0]; + + for (int32_t i = 0; i < len; ++i) { + char *startx = COLMODEL_GET_VAL(d, pDescriptor->pSchema, numOfRows, i, colIdx); + + switch (pDescriptor->pSchema->pFields[colIdx].type) { + case TSDB_DATA_TYPE_DOUBLE: + printf("%lf\t", *(double *)startx); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%f\t", *(float *)startx); + break; + case TSDB_DATA_TYPE_INT: + printf("%d\t", *(int32_t *)startx); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%d\t", *(int16_t *)startx); + break; + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BOOL: + printf("%d\t", *(int8_t *)startx); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + printf("%lld\t", *(int64_t *)startx); + break; + case TSDB_DATA_TYPE_BINARY: + printf("%s\t", startx); + break; + default: + assert(false); + } + } + printf("\n"); +} + +static int32_t qsort_call = 0; + +void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t start, int32_t end, char *data, + int32_t orderType) { + // short array sort, incur another sort procedure instead of quick sort process + __col_compar_fn_t compareFn = (orderType == TSQL_SO_ASC) ? compare_sa : compare_sd; + + if (end - start + 1 <= 8) { + tColDataInsertSort(pDescriptor, numOfRows, start, end, data, compareFn); + return; + } + +#ifdef _DEBUG_VIEW + printf("before sort:\n"); + tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +#endif + + int32_t s = start, e = end; + median(pDescriptor, numOfRows, start, end, data, compareFn); + +#ifdef _DEBUG_VIEW + printf("%s called: %d\n", __FUNCTION__, qsort_call++); +#endif + + UNUSED(qsort_call); + + int32_t end_same = end; + int32_t start_same = start; + + while (s < e) { + while (e > s) { + int32_t ret = compareFn(pDescriptor, numOfRows, e, s, data); + if (ret < 0) { + break; + } + + if (ret == 0 && e != end_same) { + swap(pDescriptor, numOfRows, e, data, end_same--); + } + e--; + } + + if (e != s) { + swap(pDescriptor, numOfRows, s, data, e); + } + +#ifdef _DEBUG_VIEW + tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +#endif + + while (s < e) { + int32_t ret = compareFn(pDescriptor, numOfRows, s, e, data); + if (ret > 0) { + break; + } + + if (ret == 0 && s != start_same) { + swap(pDescriptor, numOfRows, s, data, start_same++); + } + s++; + } + + if (s != e) { + swap(pDescriptor, numOfRows, s, data, e); + } +#ifdef _DEBUG_VIEW + tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +#endif + } + + int32_t rightx = e + 1; + if (end_same != end && e < end) { // move end data to around the pivotal + int32_t left = e + 1; + int32_t right = end; + + while (right > end_same && left <= end_same) { + swap(pDescriptor, numOfRows, left++, data, right--); + } + rightx += (end - end_same); // (pivotal+1) + steps of number that are identical pivotal + +#ifdef _DEBUG_VIEW + tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +#endif + } + + int32_t leftx = e - 1; + if (start_same != start && s > start) { + int32_t left = start; + int32_t right = e - 1; + + while (left < start_same && right >= start_same) { + swap(pDescriptor, numOfRows, left++, data, right--); + } + leftx -= (start_same - start); // (pivotal-1) - steps of number that are identical pivotal + +#ifdef _DEBUG_VIEW + tRowModelDisplay(pDescriptor, numOfRows, data, end - start + 1); +#endif + } + + if (leftx > start) { + tColDataQSort(pDescriptor, numOfRows, start, leftx, data, orderType); + } + + if (rightx < end) { + tColDataQSort(pDescriptor, numOfRows, rightx, end, data, orderType); + } +} + +tExtMemBuffer *releaseBucketsExceptFor(tMemBucket *pMemBucket, int16_t segIdx, int16_t slotIdx) { + tExtMemBuffer *pBuffer = NULL; + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + if (i == segIdx && j == slotIdx) { + pBuffer = pSeg->pBuffer[j]; + } else { + if (pSeg->pBuffer && pSeg->pBuffer[j]) { + tExtMemBufferDestroy(&pSeg->pBuffer[j]); + } + } + } + } + + return pBuffer; +} + +static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx, + tOrderDescriptor *pDesc) { + // release all data in other slots + tExtMemBuffer *pMemBuffer = pMemBucket->pSegs[segIdx].pBuffer[slotIdx]; + tFilePage * buffer = (tFilePage *)calloc(1, pMemBuffer->nElemSize * pMemBuffer->numOfAllElems + sizeof(tFilePage)); + int32_t oldCapacity = pDesc->pSchema->maxCapacity; + pDesc->pSchema->maxCapacity = pMemBuffer->numOfAllElems; + + if (!tExtMemBufferIsAllDataInMem(pMemBuffer)) { + pMemBuffer = releaseBucketsExceptFor(pMemBucket, segIdx, slotIdx); + assert(pMemBuffer->numOfAllElems > 0); + + // load data in disk to memory + tFilePage *pPage = (tFilePage *)calloc(1, pMemBuffer->nPageSize); + + for (int32_t i = 0; i < pMemBuffer->fileMeta.flushoutData.nLength; ++i) { + tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[i]; + + int32_t ret = fseek(pMemBuffer->dataFile, pFlushInfo->startPageId * pMemBuffer->nPageSize, SEEK_SET); + UNUSED(ret); + + for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) { + ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + assert(pPage->numOfElems > 0); + + tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems); + printf("id: %d count: %d\n", j, buffer->numOfElems); + } + } + tfree(pPage); + + assert(buffer->numOfElems == pMemBuffer->fileMeta.numOfElemsInFile); + } + + // load data in pMemBuffer to buffer + tFilePagesItem *pListItem = pMemBuffer->pHead; + while (pListItem != NULL) { + tColModelAppend(pDesc->pSchema, buffer, pListItem->item.data, 0, pListItem->item.numOfElems, + pListItem->item.numOfElems); + pListItem = pListItem->pNext; + } + + tColDataQSort(pDesc, buffer->numOfElems, 0, buffer->numOfElems - 1, buffer->data, TSQL_SO_ASC); + + pDesc->pSchema->maxCapacity = oldCapacity; // restore value + return buffer; +} + +double findOnlyResult(tMemBucket *pMemBucket) { + assert(pMemBucket->numOfElems == 1); + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + if (pSeg->pBuffer) { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + tExtMemBuffer *pBuffer = pSeg->pBuffer[j]; + if (pBuffer) { + assert(pBuffer->numOfAllElems == 1); + tFilePage *pPage = &pBuffer->pHead->item; + if (pBuffer->numOfElemsInBuffer == 1) { + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_INT: + return *(int32_t *)pPage->data; + case TSDB_DATA_TYPE_SMALLINT: + return *(int16_t *)pPage->data; + case TSDB_DATA_TYPE_TINYINT: + return *(int8_t *)pPage->data; + case TSDB_DATA_TYPE_BIGINT: + return (double)(*(int64_t *)pPage->data); + case TSDB_DATA_TYPE_DOUBLE: + return *(double *)pPage->data; + case TSDB_DATA_TYPE_FLOAT: + return *(float *)pPage->data; + default: + return 0; + } + } + } + } + } + } + return 0; +} + +/* + * deep copy of sschema + */ +tColModel *tColModelCreate(SSchema *field, int32_t numOfCols, int32_t maxCapacity) { + tColModel *pSchema = + (tColModel *)calloc(1, sizeof(tColModel) + numOfCols * sizeof(SSchema) + numOfCols * sizeof(int16_t)); + if (pSchema == NULL) { + return NULL; + } + + pSchema->pFields = (SSchema *)(&pSchema[1]); + memcpy(pSchema->pFields, field, sizeof(SSchema) * numOfCols); + + pSchema->colOffset = (int16_t *)(&pSchema->pFields[numOfCols]); + pSchema->colOffset[0] = 0; + for (int32_t i = 1; i < numOfCols; ++i) { + pSchema->colOffset[i] = pSchema->colOffset[i - 1] + pSchema->pFields[i - 1].bytes; + } + + pSchema->numOfCols = numOfCols; + pSchema->maxCapacity = maxCapacity; + + return pSchema; +} + +void tColModelDestroy(tColModel *pModel) { + if (pModel == NULL) { + return; + } + + tfree(pModel); +} + +static void printBinaryData(char *data, int32_t len) { + bool isCharString = true; + for (int32_t i = 0; i < len; ++i) { + if ((data[i] <= 'Z' && data[i] >= 'A') || (data[i] <= 'z' && data[i] >= 'a') || + (data[i] >= '0' && data[i] <= '9')) { + continue; + } else if (data[i] == 0) { + break; + } else { + isCharString = false; + break; + } + } + + if (len == 50) { // probably the avg intermediate result + printf("%lf,%d\t", *(double *)data, *(int64_t *)(data + sizeof(double))); + } else if (data[8] == ',') { // in TSDB_FUNC_FIRST_DST/TSDB_FUNC_LAST_DST, + // the value is seperated by ',' + printf("%ld,%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1); + } else if (isCharString) { + printf("%s\t", data); + } +} + +// todo cast to struct to extract data +static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) { + if (param->functionId == TSDB_FUNC_LAST_DST) { + switch (param->type) { + case TSDB_DATA_TYPE_TINYINT: + printf("%lld,%d\t", *(int64_t *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%lld,%d\t", *(int64_t *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + printf("%lld,%lld\t", *(int64_t *)data, *(int64_t *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%lld,%d\t", *(int64_t *)data, *(float *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%lld,%d\t", *(int64_t *)data, *(double *)(data + TSDB_KEYSIZE + 1)); + break; + case TSDB_DATA_TYPE_BINARY: + printf("%lld,%s\t", *(int64_t *)data, (data + TSDB_KEYSIZE + 1)); + break; + + case TSDB_DATA_TYPE_INT: + default: + printf("%lld,%d\t", *(int64_t *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); + break; + } + } else { + // functionId == TSDB_FUNC_MAX_DST | TSDB_FUNC_TAG + switch (param->type) { + case TSDB_DATA_TYPE_TINYINT: + printf("%d\t", *(int8_t *)data); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%d\t", *(int16_t *)data); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + printf("%lld\t", *(int64_t *)data); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%d\t", *(float *)data); + break; + case TSDB_DATA_TYPE_DOUBLE: + printf("%d\t", *(double *)data); + break; + case TSDB_DATA_TYPE_BINARY: + printf("%s\t", data); + break; + + case TSDB_DATA_TYPE_INT: + default: + printf("%d\t", *(int32_t *)data); + break; + } + } +} + +void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t totalCapacity) { + for (int32_t i = 0; i < numOfRows; ++i) { + for (int32_t j = 0; j < pModel->numOfCols; ++j) { + char *val = COLMODEL_GET_VAL((char *)pData, pModel, totalCapacity, i, j); + + printf("type:%d\t", pModel->pFields[j].type); + + switch (pModel->pFields[j].type) { + case TSDB_DATA_TYPE_BIGINT: + printf("%lld\t", *(int64_t *)val); + break; + case TSDB_DATA_TYPE_INT: + printf("%d\t", *(int32_t *)val); + break; + case TSDB_DATA_TYPE_NCHAR: { + char buf[4096] = {0}; + taosUcs4ToMbs(val, pModel->pFields[j].bytes, buf); + printf("%s\t", buf); + } + case TSDB_DATA_TYPE_BINARY: { + printBinaryData(val, pModel->pFields[j].bytes); + break; + } + case TSDB_DATA_TYPE_DOUBLE: + printf("%lf\t", *(double *)val); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + printf("%lld\t", *(int64_t *)val); + break; + case TSDB_DATA_TYPE_TINYINT: + printf("%d\t", *(int8_t *)val); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%d\t", *(int16_t *)val); + break; + case TSDB_DATA_TYPE_BOOL: + printf("%d\t", *(int8_t *)val); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%f\t", *(float *)val); + break; + default: + assert(false); + } + } + printf("\n"); + } + printf("\n"); +} + +void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32_t totalCapacity, + SSrcColumnInfo *param) { + for (int32_t i = 0; i < numOfRows; ++i) { + for (int32_t j = 0; j < pModel->numOfCols; ++j) { + char *val = COLMODEL_GET_VAL((char *)pData, pModel, totalCapacity, i, j); + + printf("type:%d\t", pModel->pFields[j].type); + + switch (pModel->pFields[j].type) { + case TSDB_DATA_TYPE_BIGINT: + printf("%lld\t", *(int64_t *)val); + break; + case TSDB_DATA_TYPE_INT: + printf("%d\t", *(int32_t *)val); + break; + case TSDB_DATA_TYPE_NCHAR: { + char buf[128] = {0}; + taosUcs4ToMbs(val, pModel->pFields[j].bytes, buf); + printf("%s\t", buf); + } + case TSDB_DATA_TYPE_BINARY: { + printBinaryDataEx(val, pModel->pFields[j].bytes, ¶m[j]); + break; + } + case TSDB_DATA_TYPE_DOUBLE: + printf("%lf\t", *(double *)val); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + printf("%lld\t", *(int64_t *)val); + break; + case TSDB_DATA_TYPE_TINYINT: + printf("%d\t", *(int8_t *)val); + break; + case TSDB_DATA_TYPE_SMALLINT: + printf("%d\t", *(int16_t *)val); + break; + case TSDB_DATA_TYPE_BOOL: + printf("%d\t", *(int8_t *)val); + break; + case TSDB_DATA_TYPE_FLOAT: + printf("%f\t", *(float *)val); + break; + default: + assert(false); + } + } + printf("\n"); + } + printf("\n"); +} + +//////////////////////////////////////////////////////////////////////////////////////////// +void tColModelCompress(tColModel *pModel, tFilePage *inputBuffer, int32_t maxElemsCapacity) { + if (inputBuffer->numOfElems == 0 || maxElemsCapacity == inputBuffer->numOfElems) { + return; + } + + /* start from the second column */ + for (int32_t m = 1; m < pModel->numOfCols; ++m) { + memmove(inputBuffer->data + pModel->colOffset[m] * inputBuffer->numOfElems, + inputBuffer->data + pModel->colOffset[m] * maxElemsCapacity, + pModel->pFields[m].bytes * inputBuffer->numOfElems); + } +} + +void tColModelErase(tColModel *pModel, tFilePage *inputBuffer, int32_t maxCapacity, int32_t s, int32_t e) { + if (inputBuffer->numOfElems == 0 || (e - s + 1) <= 0) { + return; + } + + int32_t removed = e - s + 1; + int32_t remain = inputBuffer->numOfElems - removed; + int32_t secPart = inputBuffer->numOfElems - e - 1; + + /* start from the second column */ + for (int32_t m = 0; m < pModel->numOfCols; ++m) { + char *startPos = inputBuffer->data + pModel->colOffset[m] * maxCapacity + s * pModel->pFields[m].bytes; + char *endPos = startPos + pModel->pFields[m].bytes * removed; + + memmove(startPos, endPos, pModel->pFields[m].bytes * secPart); + } + + inputBuffer->numOfElems = remain; +} + +/* + * column format data block append function + * used in write record(s) to exist column-format block + * + * data in srcData must has the same schema as data in dstPage, that can be + * described by dstModel + */ +void tColModelAppend(tColModel *dstModel, tFilePage *dstPage, void *srcData, int32_t start, int32_t numOfRows, + int32_t srcCapacity) { + assert(dstPage->numOfElems + numOfRows <= dstModel->maxCapacity); + + for (int32_t col = 0; col < dstModel->numOfCols; ++col) { + char *dst = COLMODEL_GET_VAL(dstPage->data, dstModel, dstModel->maxCapacity, dstPage->numOfElems, col); + char *src = COLMODEL_GET_VAL((char *)srcData, dstModel, srcCapacity, start, col); + + memmove(dst, src, dstModel->pFields[col].bytes * numOfRows); + } + + dstPage->numOfElems += numOfRows; +} + +tOrderDescriptor *tOrderDesCreate(int32_t *orderColIdx, int32_t numOfOrderCols, tColModel *pModel, + int32_t tsOrderType) { + tOrderDescriptor *desc = (tOrderDescriptor *)malloc(sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols); + if (desc == NULL) { + return NULL; + } + + desc->pSchema = pModel; + desc->tsOrder = tsOrderType; + + desc->orderIdx.numOfOrderedCols = numOfOrderCols; + for (int32_t i = 0; i < numOfOrderCols; ++i) { + desc->orderIdx.pData[i] = orderColIdx[i]; + } + + return desc; +} + +void tOrderDescDestroy(tOrderDescriptor *pDesc) { + if (pDesc == NULL) { + return; + } + + tColModelDestroy(pDesc->pSchema); + tfree(pDesc); +} + +//////////////////////////////////////////////////////////////////////////////////////////// +static void findMaxMinValue(tMemBucket *pMemBucket, double *maxVal, double *minVal) { + *minVal = DBL_MAX; + *maxVal = -DBL_MAX; + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + if (pSeg->pBuffer == NULL) { + continue; + } + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + double minv = pSeg->pBoundingEntries[j].iMinVal; + double maxv = pSeg->pBoundingEntries[j].iMaxVal; + + if (*minVal > minv) { + *minVal = minv; + } + if (*maxVal < maxv) { + *maxVal = maxv; + } + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + double minv = pSeg->pBoundingEntries[j].dMinVal; + double maxv = pSeg->pBoundingEntries[j].dMaxVal; + + if (*minVal > minv) { + *minVal = minv; + } + if (*maxVal < maxv) { + *maxVal = maxv; + } + } + break; + } + case TSDB_DATA_TYPE_BIGINT: { + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + double minv = (double)pSeg->pBoundingEntries[j].i64MinVal; + double maxv = (double)pSeg->pBoundingEntries[j].i64MaxVal; + + if (*minVal > minv) { + *minVal = minv; + } + if (*maxVal < maxv) { + *maxVal = maxv; + } + } + break; + } + } + } +} + +static MinMaxEntry getMinMaxEntryOfNearestSlotInNextSegment(tMemBucket *pMemBucket, int32_t segIdx) { + int32_t i = segIdx + 1; + while (i < pMemBucket->numOfSegs && pMemBucket->pSegs[i].numOfSlots == 0) ++i; + + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + assert(pMemBucket->numOfSegs > i && pMemBucket->pSegs[i].pBuffer != NULL); + + i = 0; + while (i < pMemBucket->nSlotsOfSeg && pSeg->pBuffer[i] == NULL) ++i; + + assert(i < pMemBucket->nSlotsOfSeg); + return pSeg->pBoundingEntries[i]; +} + +/* + * + * now, we need to find the minimum value of the next slot for + * interpolating the percentile value + * j is the last slot of current segment, we need to get the first + * slot of the next segment. + */ +static MinMaxEntry getMinMaxEntryOfNextSlotWithData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; + + MinMaxEntry next; + if (slotIdx == pSeg->numOfSlots - 1) { // find next segment with data + return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx); + } else { + int32_t j = slotIdx + 1; + for (; j < pMemBucket->nSlotsOfSeg && pMemBucket->pSegs[segIdx].pBuffer[j] == 0; ++j) { + }; + + if (j == pMemBucket->nSlotsOfSeg) { // current slot has no available + // slot,try next segment + return getMinMaxEntryOfNearestSlotInNextSegment(pMemBucket, segIdx); + } else { + next = pSeg->pBoundingEntries[slotIdx + 1]; + assert(pSeg->pBuffer[slotIdx + 1] != NULL); + } + } + + return next; +} + +bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx); +char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage); + +double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) { + int32_t num = 0; + + for (int32_t i = 0; i < pMemBucket->numOfSegs; ++i) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[i]; + for (int32_t j = 0; j < pSeg->numOfSlots; ++j) { + if (pSeg->pBuffer == NULL || pSeg->pBuffer[j] == NULL) { + continue; + } + // required value in current slot + if (num < (count + 1) && num + pSeg->pBuffer[j]->numOfAllElems >= (count + 1)) { + if (pSeg->pBuffer[j]->numOfAllElems + num == (count + 1)) { + /* + * now, we need to find the minimum value of the next slot for interpolating the percentile value + * j is the last slot of current segment, we need to get the first slot of the next segment. + * + */ + MinMaxEntry next = getMinMaxEntryOfNextSlotWithData(pMemBucket, i, j); + + double maxOfThisSlot = 0; + double minOfNextSlot = 0; + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: { + maxOfThisSlot = pSeg->pBoundingEntries[j].iMaxVal; + minOfNextSlot = next.iMinVal; + break; + }; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + maxOfThisSlot = pSeg->pBoundingEntries[j].dMaxVal; + minOfNextSlot = next.dMinVal; + break; + }; + case TSDB_DATA_TYPE_BIGINT: { + maxOfThisSlot = (double)pSeg->pBoundingEntries[j].i64MaxVal; + minOfNextSlot = (double)next.i64MinVal; + break; + } + }; + + assert(minOfNextSlot > maxOfThisSlot); + + double val = (1 - fraction) * maxOfThisSlot + fraction * minOfNextSlot; + return val; + } + if (pSeg->pBuffer[j]->numOfAllElems <= pMemBucket->maxElemsCapacity) { + // data in buffer and file are merged together to be processed. + tFilePage *buffer = loadIntoBucketFromDisk(pMemBucket, i, j, pMemBucket->pOrderDesc); + int32_t currentIdx = count - num; + + char * thisVal = buffer->data + pMemBucket->nElemSize * currentIdx; + char * nextVal = thisVal + pMemBucket->nElemSize; + double td, nd; + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_SMALLINT: { + td = *(int16_t *)thisVal; + nd = *(int16_t *)nextVal; + break; + } + case TSDB_DATA_TYPE_TINYINT: { + td = *(int8_t *)thisVal; + nd = *(int8_t *)nextVal; + break; + } + case TSDB_DATA_TYPE_INT: { + td = *(int32_t *)thisVal; + nd = *(int32_t *)nextVal; + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + td = *(float *)thisVal; + nd = *(float *)nextVal; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + td = *(double *)thisVal; + nd = *(double *)nextVal; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + td = (double)*(int64_t *)thisVal; + nd = (double)*(int64_t *)nextVal; + break; + } + } + double val = (1 - fraction) * td + fraction * nd; + tfree(buffer); + + return val; + } else { // incur a second round bucket split + if (isIdenticalData(pMemBucket, i, j)) { + tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j]; + + tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->nPageSize); + + char *thisVal = getFirstElemOfMemBuffer(pSeg, j, pPage); + + double finalResult = 0.0; + + switch (pMemBucket->dataType) { + case TSDB_DATA_TYPE_SMALLINT: { + finalResult = *(int16_t *)thisVal; + break; + } + case TSDB_DATA_TYPE_TINYINT: { + finalResult = *(int8_t *)thisVal; + break; + } + case TSDB_DATA_TYPE_INT: { + finalResult = *(int32_t *)thisVal; + break; + }; + case TSDB_DATA_TYPE_FLOAT: { + finalResult = *(float *)thisVal; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + finalResult = *(double *)thisVal; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + finalResult = (double)*(int64_t *)thisVal; + break; + } + } + + free(pPage); + return finalResult; + } + + pTrace("MemBucket:%p,start second round bucketing", pMemBucket); + + if (pSeg->pBuffer[j]->numOfElemsInBuffer != 0) { + pTrace("MemBucket:%p,flush %d pages to disk, clear status", pMemBucket, pSeg->pBuffer[j]->numOfPagesInMem); + + pMemBucket->numOfAvailPages += pSeg->pBuffer[j]->numOfPagesInMem; + tExtMemBufferFlush(pSeg->pBuffer[j]); + } + + tExtMemBuffer *pMemBuffer = pSeg->pBuffer[j]; + pSeg->pBuffer[j] = NULL; + + // release all + for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt]; + for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) { + if (pSeg->pBuffer && pSeg->pBuffer[ttx]) { + tExtMemBufferDestroy(&pSeg->pBuffer[ttx]); + } + } + } + + pMemBucket->nRange.i64MaxVal = pSeg->pBoundingEntries->i64MaxVal; + pMemBucket->nRange.i64MinVal = pSeg->pBoundingEntries->i64MinVal; + pMemBucket->numOfElems = 0; + + for (int32_t tt = 0; tt < pMemBucket->numOfSegs; ++tt) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[tt]; + for (int32_t ttx = 0; ttx < pSeg->numOfSlots; ++ttx) { + if (pSeg->pBoundingEntries) { + resetBoundingBox(pSeg, pMemBucket->dataType); + } + } + } + + tFilePage *pPage = (tFilePage *)malloc(pMemBuffer->nPageSize); + + tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0]; + assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize); + + int32_t ret = fseek(pMemBuffer->dataFile, pFlushInfo->startPageId * pMemBuffer->nPageSize, SEEK_SET); + UNUSED(ret); + + for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) { + ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + tMemBucketPut(pMemBucket, pPage->data, pPage->numOfElems); + } + + fclose(pMemBuffer->dataFile); + if (unlink(pMemBuffer->dataFilePath) != 0) { + pError("MemBucket:%p,remove tmp file %s failed", pMemBucket, pMemBuffer->dataFilePath); + } + tfree(pMemBuffer); + tfree(pPage); + + return getPercentileImpl(pMemBucket, count - num, fraction); + } + } else { + num += pSeg->pBuffer[j]->numOfAllElems; + } + } + } + return 0; +} + +double getPercentile(tMemBucket *pMemBucket, double percent) { + if (pMemBucket->numOfElems == 0) { + return 0.0; + } + + if (pMemBucket->numOfElems == 1) { // return the only element + return findOnlyResult(pMemBucket); + } + + percent = fabs(percent); + + // validate the parameters + if (fabs(percent - 100.0) < DBL_EPSILON || (percent < DBL_EPSILON)) { + double minx = 0, maxx = 0; + /* + * find the min/max value, no need to scan all data in bucket + */ + findMaxMinValue(pMemBucket, &maxx, &minx); + + return fabs(percent - 100) < DBL_EPSILON ? maxx : minx; + } + + double percentVal = (percent * (pMemBucket->numOfElems - 1)) / ((double)100.0); + int32_t orderIdx = (int32_t)percentVal; + + // do put data by using buckets + return getPercentileImpl(pMemBucket, orderIdx, percentVal - orderIdx); +} + +/* + * check if data in one slot are all identical + * only need to compare with the bounding box + */ +bool isIdenticalData(tMemBucket *pMemBucket, int32_t segIdx, int32_t slotIdx) { + tMemBucketSegment *pSeg = &pMemBucket->pSegs[segIdx]; + + if (pMemBucket->dataType == TSDB_DATA_TYPE_INT || pMemBucket->dataType == TSDB_DATA_TYPE_BIGINT || + pMemBucket->dataType == TSDB_DATA_TYPE_SMALLINT || pMemBucket->dataType == TSDB_DATA_TYPE_TINYINT) { + return pSeg->pBoundingEntries[slotIdx].i64MinVal == pSeg->pBoundingEntries[slotIdx].i64MaxVal; + } + + if (pMemBucket->dataType == TSDB_DATA_TYPE_FLOAT || pMemBucket->dataType == TSDB_DATA_TYPE_DOUBLE) { + return fabs(pSeg->pBoundingEntries[slotIdx].dMaxVal - pSeg->pBoundingEntries[slotIdx].dMinVal) < DBL_EPSILON; + } + + return false; +} + +/* + * get the first element of one slot into memory. + * if no data of current slot in memory, load it from disk + */ +char *getFirstElemOfMemBuffer(tMemBucketSegment *pSeg, int32_t slotIdx, tFilePage *pPage) { + tExtMemBuffer *pMemBuffer = pSeg->pBuffer[slotIdx]; + char * thisVal = NULL; + + if (pSeg->pBuffer[slotIdx]->numOfElemsInBuffer != 0) { + thisVal = pSeg->pBuffer[slotIdx]->pHead->item.data; + } else { + /* + * no data in memory, load one page into memory + */ + tFlushoutInfo *pFlushInfo = &pMemBuffer->fileMeta.flushoutData.pFlushoutInfo[0]; + assert(pFlushInfo->numOfPages == pMemBuffer->fileMeta.nFileSize); + + fseek(pMemBuffer->dataFile, pFlushInfo->startPageId * pMemBuffer->nPageSize, SEEK_SET); + size_t ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + UNUSED(ret); + thisVal = pPage->data; + } + return thisVal; +} diff --git a/src/util/src/tglobalcfg.c b/src/util/src/tglobalcfg.c new file mode 100644 index 000000000000..6196a1d31c55 --- /dev/null +++ b/src/util/src/tglobalcfg.c @@ -0,0 +1,859 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tkey.h" +#include "tlog.h" +#include "tsdb.h" +#include "tsocket.h" +#include "tsystem.h" +#include "tutil.h" + +// system info, not configurable +int64_t tsPageSize; +int64_t tsOpenMax; +int64_t tsStreamMax; +int32_t tsNumOfCores; +int32_t tsTotalDiskGB; +int32_t tsTotalMemoryMB; +int32_t tsVersion = 0; + +// global, not configurable +int tscEmbedded = 0; + +/* + * minmum scale for whole system, millisecond by default + * for TSDB_TIME_PRECISION_MILLI: 86400000L + * TSDB_TIME_PRECISION_MICRO: 86400000000L + */ +int64_t tsMsPerDay[] = {86400000L, 86400000000L}; + +short tsMgmtShellPort = 6030; // udp[6030-6034] tcp[6030] +short tsVnodeShellPort = 6035; // udp[6035-6039] tcp[6035] + +int tsStatusInterval = 1; // second +int tsShellActivityTimer = 3; // second +int tsVnodePeerHBTimer = 1; // second +int tsMgmtPeerHBTimer = 1; // second +int tsMeterMetaKeepTimer = 7200; // second +int tsMetricMetaKeepTimer = 600; // second + +float tsNumOfThreadsPerCore = 1.0; +float tsRatioOfQueryThreads = 0.5; +char tsInternalIp[TSDB_IPv4ADDR_LEN] = {0}; +int tsNumOfVnodesPerCore = 8; +int tsNumOfTotalVnodes = 0; + +int tsSessionsPerVnode = 1000; +int tsCacheBlockSize = 16384; // 256 columns +int tsAverageCacheBlocks = 4; + +int tsRowsInFileBlock = 4096; +float tsFileBlockMinPercent = 0.25; + +short tsNumOfBlocksPerMeter = 100; +int tsCommitTime = 3600; // seconds +int tsCommitLog = 1; +int tsCompression = 2; +int tsDaysPerFile = 10; +int tsDaysToKeep = 3650; + +int tsMaxShellConns = 2000; +int tsMaxUsers = 1000; +int tsMaxDbs = 1000; +int tsMaxTables = 650000; +int tsMaxDnodes = 1000; +int tsMaxVGroups = 1000; + +char tsLocalIp[TSDB_IPv4ADDR_LEN] = "0.0.0.0"; +char tsDefaultDB[TSDB_DB_NAME_LEN] = {0}; +char tsDefaultUser[64] = "root"; +char tsDefaultPass[64] = "taosdata"; +int tsMaxMeterConnections = 10000; +int tsMaxMgmtConnections = 2000; +int tsMaxVnodeConnections = 10000; + +int tsEnableHttpModule = 1; +int tsEnableMonitorModule = 1; + +int tsTimePrecision = TSDB_TIME_PRECISION_MILLI; // time precision, millisecond by default +int tsMinSlidingTime = 10; // 10 ms for sliding time, the value will changed in + // case of time precision changed +int tsMinIntervalTime = 10; // 10 ms for interval time range, changed accordingly +int tsMaxStreamComputDelay = 20000; // 20sec, the maximum value of stream + // computing delay, changed accordingly +int tsStreamCompStartDelay = 10000; // 10sec, the first stream computing delay + // time after system launched successfully, + // changed accordingly +int tsStreamCompRetryDelay = 10; // the stream computing delay time after + // executing failed, change accordingly + +int tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once +int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance + +char tsHttpIp[TSDB_IPv4ADDR_LEN] = "0.0.0.0"; +short tsHttpPort = 6020; // only tcp, range tcp[6020] +// short tsNginxPort = 6060; //only tcp, range tcp[6060] +int tsHttpCacheSessions = 2000; +int tsHttpSessionExpire = 36000; +int tsHttpMaxThreads = 2; +int tsHttpEnableCompress = 0; +int tsAdminRowLimit = 10240; + +char tsMonitorDbName[] = "log"; +int tsMonitorInterval = 30; // seconds +char tsInternalPass[] = "secretkey"; + +char tsTimezone[64] = {0}; +char tsLocale[TSDB_LOCALE_LEN] = {0}; +char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string + +int tsNumOfLogLines = 10000000; +int ddebugFlag = 131; +int mdebugFlag = 135; +int sdbDebugFlag = 135; +int cdebugFlag = 131; +int jnidebugFlag = 131; +int httpDebugFlag = 131; +int monitorDebugFlag = 131; +int debugFlag = 131; +int odbcdebugFlag = 131; +int qdebugFlag = 131; + +SGlobalConfig *tsGlobalConfig = NULL; +int tsGlobalConfigNum = 0; + +char *tsGlobalUnit[] = { + " ", "(%)", "(GB)", "(MB)", "(Mb)", "(byte)", "(s)", "(ms)", +}; + +char *tsCfgStatusStr[] = {"none", "system default", "config file", "taos_options", "program argument list"}; + +void tsReadFloatConfig(SGlobalConfig *cfg, char *input_value) { + float value = (float)atof(input_value); + float *option = (float *)cfg->ptr; + if (value < cfg->minValue || value > cfg->maxValue) { + pError("config option:%s, input value:%s, out of range[%f, %f], use default value:%f", + cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + *option = value; + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %f", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], *option); + } + } +} + +void tsReadIntConfig(SGlobalConfig *cfg, char *input_value) { + int32_t value = atoi(input_value); + int32_t *option = (int32_t *)cfg->ptr; + if (value < cfg->minValue || value > cfg->maxValue) { + pError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", + cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + *option = value; + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], *option); + } + } +} + +void tsReadUIntConfig(SGlobalConfig *cfg, char *input_value) { + uint32_t value = (uint32_t)atoi(input_value); + uint32_t *option = (uint32_t *)cfg->ptr; + if (value < (uint32_t)cfg->minValue || value > (uint32_t)cfg->maxValue) { + pError("config option:%s, input value:%s, out of range[%f, %f], use default value:%u", + cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + *option = value; + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %u", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], *option); + } + } +} + +void tsReadShortConfig(SGlobalConfig *cfg, char *input_value) { + int32_t value = atoi(input_value); + int16_t *option = (int16_t *)cfg->ptr; + if (value < cfg->minValue || value > cfg->maxValue) { + pError("config option:%s, input value:%s, out of range[%f, %f], use default value:%d", + cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + *option = (int16_t)value; + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %d", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], *option); + } + } +} + +void tsReadFilePathConfig(SGlobalConfig *cfg, char *input_value) { + int length = strlen(input_value); + char *option = (char *)cfg->ptr; + if (length <= 0 || length > cfg->ptrLength) { + pError("config option:%s, input value:%s, length out of range[0, %d], use default value:%s", + cfg->option, input_value, cfg->ptrLength, option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + wordexp_t full_path; + wordexp(input_value, &full_path, 0); + if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) { + strcpy(option, full_path.we_wordv[0]); + } + wordfree(&full_path); + + struct stat dirstat; + if (stat(option, &dirstat) < 0) { + int code = mkdir(option, 0755); + pPrint("config option:%s, input value:%s, directory not exist, create with return code:%d", + cfg->option, input_value, code); + } + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], option); + } + } +} + +void tsReadIpConfig(SGlobalConfig *cfg, char *input_value) { + uint32_t value = inet_addr(input_value); + char * option = (char *)cfg->ptr; + if (value == INADDR_NONE) { + pError("config option:%s, input value:%s, is not a valid ip address, use default value:%s", + cfg->option, input_value, option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + strncpy(option, input_value, cfg->ptrLength); + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], option); + } + } +} + +void tsReadStrConfig(SGlobalConfig *cfg, char *input_value) { + int length = strlen(input_value); + char *option = (char *)cfg->ptr; + if (length <= 0 || length > cfg->ptrLength) { + pError("config option:%s, input value:%s, length out of range[0, %d], use default value:%s", + cfg->option, input_value, cfg->ptrLength, option); + } else { + if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_FILE) { + strncpy(option, input_value, cfg->ptrLength); + cfg->cfgStatus = TSDB_CFG_CSTATUS_FILE; + } else { + pWarn("config option:%s, input value:%s, is configured by %s, use %s", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], option); + } + } +} + +void tsReadLogOption(char *option, char *value) { + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalConfig *cfg = tsGlobalConfig + i; + if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_CONFIG) || !(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; + if (strcasecmp(cfg->option, option) != 0) continue; + + switch (cfg->valType) { + case TSDB_CFG_VTYPE_INT: + tsReadIntConfig(cfg, value); + if (strcasecmp(cfg->option, "debugFlag") == 0) { + tsSetAllDebugFlag(); + } + break; + case TSDB_CFG_VTYPE_DIRECTORY: + tsReadFilePathConfig(cfg, value); + break; + default: + break; + } + break; + } +} + +SGlobalConfig *tsGetConfigOption(char *option) { + tsInitGlobalConfig(); + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalConfig *cfg = tsGlobalConfig + i; + if (strcasecmp(cfg->option, option) != 0) continue; + return cfg; + } + return NULL; +} + +void tsReadConfigOption(char *option, char *value) { + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalConfig *cfg = tsGlobalConfig + i; + if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_CONFIG)) continue; + if (strcasecmp(cfg->option, option) != 0) continue; + + switch (cfg->valType) { + case TSDB_CFG_VTYPE_SHORT: + tsReadShortConfig(cfg, value); + break; + case TSDB_CFG_VTYPE_INT: + tsReadIntConfig(cfg, value); + break; + case TSDB_CFG_VTYPE_UINT: + tsReadUIntConfig(cfg, value); + break; + case TSDB_CFG_VTYPE_FLOAT: + tsReadFloatConfig(cfg, value); + break; + case TSDB_CFG_VTYPE_STRING: + tsReadStrConfig(cfg, value); + break; + case TSDB_CFG_VTYPE_IPSTR: + tsReadIpConfig(cfg, value); + break; + case TSDB_CFG_VTYPE_DIRECTORY: + tsReadFilePathConfig(cfg, value); + break; + default: + pError("config option:%s, input value:%s, can't be recognized", option, value); + break; + } + break; + } +} + +void tsInitConfigOption(SGlobalConfig *cfg, char *name, void *ptr, int8_t valType, int8_t cfgType, float minVal, + float maxVal, uint8_t ptrLength, int8_t unitType) { + cfg->option = name; + cfg->ptr = ptr; + cfg->valType = valType; + cfg->cfgType = cfgType; + cfg->minValue = minVal; + cfg->maxValue = maxVal; + cfg->ptrLength = ptrLength; + cfg->unitType = unitType; + cfg->cfgStatus = TSDB_CFG_CSTATUS_NONE; +} + +void tsInitGlobalConfig() { + if (tsGlobalConfig != NULL) return; + + tsGlobalConfig = (SGlobalConfig *)malloc(sizeof(SGlobalConfig) * TSDB_CFG_MAX_NUM); + memset(tsGlobalConfig, 0, sizeof(SGlobalConfig) * TSDB_CFG_MAX_NUM); + + SGlobalConfig *cfg = tsGlobalConfig; + + // ip address + tsInitConfigOption(cfg++, "internalIp", tsInternalIp, TSDB_CFG_VTYPE_IPSTR, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "localIp", tsLocalIp, TSDB_CFG_VTYPE_IPSTR, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "httpIp", tsHttpIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG, 0, 0, TSDB_IPv4ADDR_LEN, + TSDB_CFG_UTYPE_NONE); + + // port + tsInitConfigOption(cfg++, "httpPort", &tsHttpPort, TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 65535, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "mgmtShellPort", &tsMgmtShellPort, TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, 1, 65535, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "vnodeShellPort", &tsVnodeShellPort, TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, 1, 65535, 0, + TSDB_CFG_UTYPE_NONE); + + // directory + tsInitConfigOption(cfg++, "dataDir", dataDir, TSDB_CFG_VTYPE_DIRECTORY, TSDB_CFG_CTYPE_B_CONFIG, 0, 0, + TSDB_FILENAME_LEN, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "logDir", logDir, TSDB_CFG_VTYPE_DIRECTORY, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_FILENAME_LEN, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "scriptDir", scriptDir, TSDB_CFG_VTYPE_DIRECTORY, TSDB_CFG_CTYPE_B_CONFIG, 0, 0, + TSDB_FILENAME_LEN, TSDB_CFG_UTYPE_NONE); + + // dnode configs + tsInitConfigOption(cfg++, "numOfThreadsPerCore", &tsNumOfThreadsPerCore, TSDB_CFG_VTYPE_FLOAT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 10, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "ratioOfQueryThreads", &tsRatioOfQueryThreads, TSDB_CFG_VTYPE_FLOAT, + TSDB_CFG_CTYPE_B_CONFIG, 0.1, 0.9, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "numOfVnodesPerCore", &tsNumOfVnodesPerCore, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 64, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "numOfTotalVnodes", &tsNumOfTotalVnodes, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, 0, + TSDB_MAX_VNODES, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "tables", &tsSessionsPerVnode, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 4, 220000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "cache", &tsCacheBlockSize, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 100, 1048576, 0, TSDB_CFG_UTYPE_BYTE); + tsInitConfigOption(cfg++, "rows", &tsRowsInFileBlock, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 200, 1048576, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "fileBlockMinPercent", &tsFileBlockMinPercent, TSDB_CFG_VTYPE_FLOAT, + TSDB_CFG_CTYPE_B_CONFIG, 0, 1.0, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "ablocks", &tsAverageCacheBlocks, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 2, 128, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "tblocks", &tsNumOfBlocksPerMeter, TSDB_CFG_VTYPE_SHORT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 32, 4096, 0, TSDB_CFG_UTYPE_NONE); + + // time + tsInitConfigOption(cfg++, "monitorInterval", &tsMonitorInterval, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, 1, 600, + 0, TSDB_CFG_UTYPE_SECOND); + tsInitConfigOption(cfg++, "rpcTimer", &tsRpcTimer, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 100, 3000, 0, TSDB_CFG_UTYPE_MS); + tsInitConfigOption(cfg++, "rpcMaxTime", &tsRpcMaxTime, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 100, 7200, 0, TSDB_CFG_UTYPE_SECOND); + tsInitConfigOption(cfg++, "ctime", &tsCommitTime, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 30, 40960, 0, TSDB_CFG_UTYPE_SECOND); + tsInitConfigOption(cfg++, "statusInterval", &tsStatusInterval, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 10, 0, TSDB_CFG_UTYPE_SECOND); + tsInitConfigOption(cfg++, "shellActivityTimer", &tsShellActivityTimer, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 1, 120, 0, TSDB_CFG_UTYPE_SECOND); + tsInitConfigOption(cfg++, "meterMetaKeepTimer", &tsMeterMetaKeepTimer, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 1, 36000, 0, TSDB_CFG_UTYPE_SECOND); + tsInitConfigOption(cfg++, "metricMetaKeepTimer", &tsMetricMetaKeepTimer, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 1, 36000, 0, TSDB_CFG_UTYPE_SECOND); + + // mgmt configs + tsInitConfigOption(cfg++, "maxUsers", &tsMaxUsers, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 1000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "maxDbs", &tsMaxDbs, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, + 10000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "maxTables", &tsMaxTables, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 100000000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "maxVGroups", &tsMaxVGroups, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 1000000, 0, TSDB_CFG_UTYPE_NONE); + + tsInitConfigOption(cfg++, "minSlidingTime", &tsMinSlidingTime, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000, 0, TSDB_CFG_UTYPE_MS); + tsInitConfigOption(cfg++, "minIntervalTime", &tsMinIntervalTime, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000, 0, TSDB_CFG_UTYPE_MS); + tsInitConfigOption(cfg++, "maxStreamCompDelay", &tsMaxStreamComputDelay, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000000, 0, TSDB_CFG_UTYPE_MS); + tsInitConfigOption(cfg++, "maxFirstStreamCompDelay", &tsStreamCompStartDelay, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1000, 1000000000, 0, TSDB_CFG_UTYPE_MS); + tsInitConfigOption(cfg++, "retryStreamCompDelay", &tsStreamCompRetryDelay, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000000, 0, TSDB_CFG_UTYPE_MS); + + tsInitConfigOption(cfg++, "clog", &tsCommitLog, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "comp", &tsCompression, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 2, 0, TSDB_CFG_UTYPE_NONE); + + // database configs + tsInitConfigOption(cfg++, "days", &tsDaysPerFile, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 365, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "keep", &tsDaysToKeep, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 1, 365000, 0, TSDB_CFG_UTYPE_NONE); + + // login configs + tsInitConfigOption(cfg++, "defaultDB", tsDefaultDB, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_DB_NAME_LEN, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "defaultUser", tsDefaultUser, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_USER_LEN, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "defaultPass", tsDefaultPass, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_PASSWORD_LEN, TSDB_CFG_UTYPE_NONE); + + // locale & charset + tsInitConfigOption(cfg++, "timezone", tsTimezone, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, tListLen(tsTimezone), + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "locale", tsLocale, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, tListLen(tsLocale), TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "charset", tsCharset, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, tListLen(tsCharset), TSDB_CFG_UTYPE_NONE); + + // connect configs + tsInitConfigOption(cfg++, "maxShellConns", &tsMaxShellConns, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 50000000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "maxMeterConnections", &tsMaxMeterConnections, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 50000000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "maxMgmtConnections", &tsMaxMgmtConnections, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 50000000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "maxVnodeConnections", &tsMaxVnodeConnections, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 50000000, 0, TSDB_CFG_UTYPE_NONE); + + // module configs + tsInitConfigOption(cfg++, "enableHttp", &tsEnableHttpModule, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 1, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "enableMonitor", &tsEnableMonitorModule, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0, 1, 1, TSDB_CFG_UTYPE_NONE); + + // http configs + tsInitConfigOption(cfg++, "httpCacheSessions", &tsHttpCacheSessions, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, 1, + 100000, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "httpMaxThreads", &tsHttpMaxThreads, TSDB_CFG_VTYPE_INT, TSDB_CFG_CTYPE_B_CONFIG, 1, + 1000000, 0, TSDB_CFG_UTYPE_NONE); + + // debug flag + tsInitConfigOption(cfg++, "numOfLogLines", &tsNumOfLogLines, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 10000, 2000000000, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "asyncLog", &tsAsyncLog, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 1, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "debugFlag", &debugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "mDebugFlag", &mdebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG, 0, 255, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "dDebugFlag", &ddebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG, 0, 255, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "sdbDebugFlag", &sdbDebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG, 0, 255, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "taosDebugFlag", &taosDebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "tmrDebugFlag", &tmrDebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "cDebugFlag", &cdebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "jniDebugFlag", &jnidebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "odbcDebugFlag", &odbcdebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "uDebugFlag", &uDebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "httpDebugFlag", &httpDebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG, 0, 255, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "monitorDebugFlag", &monitorDebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG, 0, 255, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "qDebugFlag", &qdebugFlag, TSDB_CFG_VTYPE_INT, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT, 0, 255, 0, + TSDB_CFG_UTYPE_NONE); + + // version info + tsInitConfigOption(cfg++, "gitinfo", gitinfo, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, + 0, 0, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "buildinfo", buildinfo, TSDB_CFG_VTYPE_STRING, + TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, 0, TSDB_CFG_UTYPE_NONE); + tsInitConfigOption(cfg++, "version", version, TSDB_CFG_VTYPE_STRING, TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT, + 0, 0, 0, TSDB_CFG_UTYPE_NONE); + + tsGlobalConfigNum = (int)(cfg - tsGlobalConfig); +} + +void tsReadGlobalLogConfig() { + tsInitGlobalConfig(); + + FILE * fp; + char * line, *option, *value; + size_t len; + int olen, vlen; + char fileName[128]; + + mdebugFlag = 135; + sdbDebugFlag = 135; + + wordexp_t full_path; + wordexp(configDir, &full_path, 0); + if (full_path.we_wordv != NULL && full_path.we_wordv[0] != NULL) { + strcpy(configDir, full_path.we_wordv[0]); + } else { + strcpy(configDir, "/etc/taos"); + printf("configDir:%s not there, use default value: /etc/taos", configDir); + } + wordfree(&full_path); + + sprintf(fileName, "%s/taos.cfg", configDir); + fp = fopen(fileName, "r"); + if (fp == NULL) { + printf("option file:%s not found, all options are set to system default\n", fileName); + tsReadLogOption("logDir", logDir); + return; + } + + line = NULL; + while (!feof(fp)) { + tfree(line); + line = option = value = NULL; + len = olen = vlen = 0; + + getline(&line, &len, fp); + if (line == NULL) break; + + paGetToken(line, &option, &olen); + if (olen == 0) continue; + option[olen] = 0; + + paGetToken(option + olen + 1, &value, &vlen); + if (vlen == 0) continue; + value[vlen] = 0; + + tsReadLogOption(option, value); + } + + tfree(line); + fclose(fp); +} + +bool tsReadGlobalConfig() { + tsInitGlobalConfig(); + + FILE * fp; + char * line, *option, *value, *value1; + size_t len; + int olen, vlen, vlen1; + char fileName[128]; + + sprintf(fileName, "%s/taos.cfg", configDir); + fp = fopen(fileName, "r"); + if (fp == NULL) { + // printf("option file:%s not there, all options are set to system default\n", fileName); + // return; + } else { + line = NULL; + while (!feof(fp)) { + tfree(line); + line = option = value = NULL; + len = olen = vlen = 0; + + getline(&line, &len, fp); + if (line == NULL) break; + + paGetToken(line, &option, &olen); + if (olen == 0) continue; + option[olen] = 0; + + paGetToken(option + olen + 1, &value, &vlen); + if (vlen == 0) continue; + value[vlen] = 0; + + // For dataDir, the format is: + // dataDir /mnt/disk1 0 + paGetToken(value + vlen + 1, &value1, &vlen1); + + tsReadConfigOption(option, value); + } + + tfree(line); + fclose(fp); + } + + if (tsInternalIp[0] == 0) { + taosGetPrivateIp(tsInternalIp); + } + + if (tsLocalIp[0] == 0) { + strcpy(tsLocalIp, tsInternalIp); + } + + taosGetSystemInfo(); + + tsSetLocale(); + + SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); + if (cfg_timezone && cfg_timezone->cfgStatus == TSDB_CFG_CSTATUS_FILE) { + tsSetTimeZone(); + } + + if (tsNumOfCores <= 0) { + tsNumOfCores = 1; + } + + if (tscEmbedded) { + strcpy(tsLocalIp, tsInternalIp); + } + + tsVersion = 0; + for (int i = 0; i < 10; i++) { + if (version[i] >= '0' && version[i] <= '9') { + tsVersion = tsVersion * 10 + (version[i] - '0'); + } else if (version[i] == 0) { + break; + } + } + tsVersion = 10 * tsVersion; + + return true; +} + +int tsCfgDynamicOptions(char *msg) { + char *option, *value; + int olen, vlen, code = 0; + int vint = 0; + + paGetToken(msg, &option, &olen); + if (olen == 0) return 0; + + paGetToken(option + olen + 1, &value, &vlen); + if (vlen == 0) + vint = 135; + else { + vint = atoi(value); + } + + pPrint("change dynamic option: %s, value: %d", option, vint); + + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalConfig *cfg = tsGlobalConfig + i; + if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; + if (cfg->valType != TSDB_CFG_VTYPE_INT) continue; + if (strncasecmp(option, cfg->option, olen) != 0) continue; + *((int *)cfg->ptr) = vint; + + if (strncasecmp(cfg->option, "debugFlag", olen) == 0) tsSetAllDebugFlag(); + return code; + } + + if (strncasecmp(option, "resetlog", 8) == 0) { + taosResetLogFile(); + tsPrintGlobalConfig(); + } + if (strncasecmp(option, "resetQueryCache", 15) == 0) { + if (taosLogSqlFp) { + pPrint("the query cache of internal client will reset"); + taosLogSqlFp("reset query cache"); + } else { + pError("reset query cache can't be executed, for monitor not initialized"); + code = 169; + } + } else { + code = 169; // INVALID_OPTION + } + + return code; +} + +void tsPrintGlobalConfig() { + pPrint(" taos config & system info:"); + pPrint("=================================="); + + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalConfig *cfg = tsGlobalConfig + i; + if (tscEmbedded == 0 && !(cfg->cfgType & TSDB_CFG_CTYPE_B_CLIENT)) continue; + + int optionLen = (int)strlen(cfg->option); + int blankLen = TSDB_CFG_PRINT_LEN - optionLen; + blankLen = blankLen < 0 ? 0 : blankLen; + + char blank[TSDB_CFG_PRINT_LEN]; + memset(blank, ' ', TSDB_CFG_PRINT_LEN); + blank[blankLen] = 0; + + switch (cfg->valType) { + case TSDB_CFG_VTYPE_SHORT: + pPrint(" %s:%s%d%s", cfg->option, blank, *((int16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TSDB_CFG_VTYPE_INT: + pPrint(" %s:%s%d%s", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TSDB_CFG_VTYPE_UINT: + pPrint(" %s:%s%d%s", cfg->option, blank, *((uint32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TSDB_CFG_VTYPE_FLOAT: + pPrint(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TSDB_CFG_VTYPE_STRING: + case TSDB_CFG_VTYPE_IPSTR: + case TSDB_CFG_VTYPE_DIRECTORY: + pPrint(" %s:%s%s%s", cfg->option, blank, (char *)cfg->ptr, tsGlobalUnit[cfg->unitType]); + break; + default: + break; + } + } + + tsPrintOsInfo(); + + pPrint("=================================="); +} + +void tsSetAllDebugFlag() { + if (mdebugFlag != debugFlag) mdebugFlag = debugFlag; + if (ddebugFlag != debugFlag) ddebugFlag = debugFlag; + if (sdbDebugFlag != debugFlag) sdbDebugFlag = debugFlag; + if (taosDebugFlag != debugFlag) taosDebugFlag = debugFlag; + if (cdebugFlag != debugFlag) cdebugFlag = debugFlag; + if (jnidebugFlag != debugFlag) jnidebugFlag = debugFlag; + if (uDebugFlag != debugFlag) uDebugFlag = debugFlag; + if (httpDebugFlag != debugFlag) httpDebugFlag = debugFlag; + if (monitorDebugFlag != debugFlag) monitorDebugFlag = debugFlag; + if (odbcdebugFlag != debugFlag) odbcdebugFlag = debugFlag; + pPrint("all debug flag are set to %d", debugFlag); +} + +void tsSetLocale() { + char msgLocale[] = "Invalid locale:%s, please set the valid locale in config file"; + char msgCharset[] = "Invalid charset:%s, please set the valid charset in config file"; + char msgCharset1[] = "failed to get charset, please set the valid charset in config file"; + + char *locale = setlocale(LC_CTYPE, tsLocale); + + /* default locale or user specified locale is not valid, abort launch */ + if (locale == NULL) { + printf(msgLocale, tsLocale); + pPrint(msgLocale, tsLocale); + exit(-1); + } + + if (strlen(tsCharset) == 0) { + printf("%s\n", msgCharset1); + pPrint(msgCharset1); + exit(-1); + } + + if (!taosValidateEncodec(tsCharset)) { + printf(msgCharset, tsCharset); + pPrint(msgCharset, tsCharset); + exit(-1); + } +} + +void tsSetTimeZone() { + SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); + pPrint("timezone is set to %s by %s", tsTimezone, tsCfgStatusStr[cfg_timezone->cfgStatus]); + + setenv("TZ", tsTimezone, 1); + tzset(); + + /* + * get CURRENT time zone. + * system current time zone is affected by daylight saving time(DST) + * + * e.g., the local time zone of London in DST is GMT+01:00, + * otherwise is GMT+00:00 + */ + int32_t tz = (-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR; + tz += daylight; + + /* + * format: + * (CST, +0800) + * (BST, +0100) + */ + sprintf(tsTimezone, "(%s, %s%02d00)", tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); + + pPrint("timezone format changed to %s", tsTimezone); +} diff --git a/src/util/src/thash.c b/src/util/src/thash.c new file mode 100644 index 000000000000..828072b55649 --- /dev/null +++ b/src/util/src/thash.c @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "tmempool.h" + +typedef struct _long_hash_t { + unsigned int id; + struct _long_hash_t *prev; + struct _long_hash_t *next; + uint64_t cont; +} SLongHash; + +typedef struct { + SLongHash **longHashList; + mpool_h longHashMemPool; + int (*hashFp)(void *, uint64_t); + int maxSessions; + pthread_mutex_t mutex; +} SHashObj; + +uint64_t taosHashUInt64(uint64_t handle) { + uint64_t hash = handle >> 16; + hash += handle & 0xFFFF; + return hash; +} + +int taosHashLong(void *handle, uint64_t ip) { + SHashObj *pObj = (SHashObj *)handle; + int hash = 0; + + hash = (int)(ip >> 16); + hash += (int)(ip & 0xFFFF); + + hash = hash % pObj->maxSessions; + + return hash; +} + +int taosAddHash(void *handle, uint64_t cont, unsigned int id) { + int hash; + SLongHash *pNode; + SHashObj * pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return -1; + + pthread_mutex_lock(&pObj->mutex); + + hash = (*pObj->hashFp)(pObj, cont); + + pNode = (SLongHash *)taosMemPoolMalloc(pObj->longHashMemPool); + pNode->cont = cont; + pNode->id = id; + pNode->prev = 0; + pNode->next = pObj->longHashList[hash]; + + if (pObj->longHashList[hash] != 0) (pObj->longHashList[hash])->prev = pNode; + pObj->longHashList[hash] = pNode; + + pthread_mutex_unlock(&pObj->mutex); + + return 0; +} + +void taosDeleteHash(void *handle, uint64_t cont) { + int hash; + SLongHash *pNode; + SHashObj * pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + hash = (*pObj->hashFp)(pObj, cont); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->longHashList[hash]; + while (pNode) { + if (pNode->cont == cont) break; + + pNode = pNode->next; + } + + if (pNode) { + if (pNode->prev) { + pNode->prev->next = pNode->next; + } else { + pObj->longHashList[hash] = pNode->next; + } + + if (pNode->next) { + pNode->next->prev = pNode->prev; + } + + taosMemPoolFree(pObj->longHashMemPool, (char *)pNode); + } + + pthread_mutex_unlock(&pObj->mutex); +} + +int32_t taosGetIdFromHash(void *handle, uint64_t cont) { + int hash; + SLongHash *pNode; + SHashObj * pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return -1; + + hash = (*pObj->hashFp)(pObj, cont); + + pthread_mutex_lock(&pObj->mutex); + + pNode = pObj->longHashList[hash]; + + while (pNode) { + if (pNode->cont == cont) { + break; + } + + pNode = pNode->next; + } + + pthread_mutex_unlock(&pObj->mutex); + + if (pNode) return (int32_t)pNode->id; + + return -1; +} + +void *taosOpenHash(int maxSessions, int (*fp)(void *, uint64_t)) { + SLongHash **longHashList; + mpool_h longHashMemPool; + SHashObj * pObj; + + longHashMemPool = taosMemPoolInit(maxSessions, sizeof(SLongHash)); + if (longHashMemPool == 0) return NULL; + + longHashList = calloc(sizeof(SLongHash *), (size_t)maxSessions); + if (longHashList == 0) { + taosMemPoolCleanUp(longHashMemPool); + return NULL; + } + + pObj = malloc(sizeof(SHashObj)); + if (pObj == NULL) { + taosMemPoolCleanUp(longHashMemPool); + free(longHashList); + return NULL; + } + + pObj->maxSessions = maxSessions; + pObj->longHashMemPool = longHashMemPool; + pObj->longHashList = longHashList; + pObj->hashFp = fp; + + pthread_mutex_init(&pObj->mutex, NULL); + + return pObj; +} + +void taosCloseHash(void *handle) { + SHashObj *pObj; + + pObj = (SHashObj *)handle; + if (pObj == NULL || pObj->maxSessions == 0) return; + + pthread_mutex_lock(&pObj->mutex); + + if (pObj->longHashMemPool) taosMemPoolCleanUp(pObj->longHashMemPool); + + if (pObj->longHashList) free(pObj->longHashList); + + pthread_mutex_unlock(&pObj->mutex); + + pthread_mutex_destroy(&pObj->mutex); + + memset(pObj, 0, sizeof(SHashObj)); + free(pObj); +} diff --git a/src/util/src/thashutil.c b/src/util/src/thashutil.c new file mode 100644 index 000000000000..b6b3ea682ef9 --- /dev/null +++ b/src/util/src/thashutil.c @@ -0,0 +1,77 @@ +/** + * MurmurHash3 by Austin Appleby + * @ref + * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp + * + * Plese refers to the link above for the complete implementation of + * MurmurHash algorithm + * + */ +#include "tutil.h" + +#define ROTL32(x, r) ((x) << (r) | (x) >> (32 - (r))) + +#define FMIX32(h) \ + do { \ + (h) ^= (h) >> 16; \ + (h) *= 0x85ebca6b; \ + (h) ^= (h) >> 13; \ + (h) *= 0xc2b2ae35; \ + (h) ^= (h) >> 16; \ + } while (0) + +static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out) { + const uint8_t *data = (const uint8_t *)key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4); + + for (int i = -nblocks; i; i++) { + uint32_t k1 = blocks[i]; + + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; + } + + const uint8_t *tail = (data + nblocks * 4); + + uint32_t k1 = 0; + + switch (len & 3) { + case 3: + k1 ^= tail[2] << 16; + case 2: + k1 ^= tail[1] << 8; + case 1: + k1 ^= tail[0]; + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + h1 ^= k1; + }; + + h1 ^= len; + + FMIX32(h1); + + *(uint32_t *)out = h1; +} + +uint32_t MurmurHash3_32(const void *key, int len) { + const int32_t hashSeed = 0x12345678; + + uint32_t val = 0; + MurmurHash3_32_s(key, len, hashSeed, &val); + + return val; +} diff --git a/src/util/src/thistogram.c b/src/util/src/thistogram.c new file mode 100644 index 000000000000..f8294df4dd8c --- /dev/null +++ b/src/util/src/thistogram.c @@ -0,0 +1,668 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "taosmsg.h" +#include "thistogram.h" +#include "tlosertree.h" +#include "tsdb.h" + +/** + * + * implement the histogram and percentile_approx based on the paper: + * Yael Ben-Haim, Elad Tom-Tov. A Streaming Parallel Decision Tree Algorithm, + * The Journal of Machine Learning Research.Volume 11, 3/1/2010 pp.849-872 + * https://dl.acm.org/citation.cfm?id=1756034 + * + * @data 2018-12-14 + * @version 0.1 + * + */ + +// SHeapEntry* tHeapCreate(int32_t numOfEntries) { +// SHeapEntry* pEntry = calloc(1, sizeof(SHeapEntry)*(numOfEntries + 1)); +// return pEntry; +//} +// +// int32_t tHeapPut(SHeapEntry* pEntry, int32_t maxSize, int32_t num, void* +// pData, double v) { +// pEntry[num].val = v; +// pEntry[num].pData = pData; +// +// return num; +//} +// +////min heap +// void tHeapAdjust(SHeapEntry* pEntry, int32_t index, int32_t len) { +// SHeapEntry* ptr = NULL; +// +// int32_t end = len - 1; +// +// SHeapEntry p1 = pEntry[index]; +// int32_t next = index; +// +// for(int32_t i=index; i<=(end-1)/2; ) { +// int32_t lc = (i<<1) + 1; +// int32_t rc = (i+1) << 1; +// +// ptr = &pEntry[lc]; +// next = lc; +// +// if (rc < len && (pEntry[lc].val > pEntry[rc].val)) { +// ptr = &pEntry[rc]; +// next = rc; +// } +// +// if (p1.val < ptr->val) { +// next = i; +// break; +// } +// pEntry[i] = *ptr; +// tSkipListNode* pnode = (tSkipListNode*) pEntry[i].pData; +// if(pnode != NULL) { +// ((SHistBin*) pnode->pData)->index = i; +// } +// +// i = next; +// } +// +// pEntry[next] = p1; +// +// tSkipListNode* pnode = (tSkipListNode*) p1.pData; +// if (pnode != NULL) { +// ((SHistBin*) pnode->pData)->index = next; +// } +//} +// +// void tHeapSort(SHeapEntry* pEntry, int32_t len) { +// int32_t last = len/2 - 1; +// +// for(int32_t i=last; i >= 0; --i) { +// tHeapAdjust(pEntry, i, len); +// } +//} + +// typedef struct SInsertSupporter { +// int32_t numOfEntries; +// tSkipList* pSkipList; +// SLoserTreeInfo* pTree; +//} SInsertSupporter; +// +// int32_t compare(const void* pleft, const void* pright, void* param) { +// SLoserTreeNode* left = (SLoserTreeNode*) pleft; +// SLoserTreeNode* right = (SLoserTreeNode *)pright; +// +// SInsertSupporter* pss = (SInsertSupporter*) param; +// +// tSkipListNode* pLeftNode = (tSkipListNode*) left->pData; +// tSkipListNode* pRightNode = (tSkipListNode*) right->pData; +// +// SHistBin* pLeftBin = (SHistBin*)pLeftNode->pData; +// SHistBin* pRightBin = (SHistBin*)pRightNode->pData; +// +// if (pLeftBin->delta == pRightBin->delta) { +// return 0; +// } else { +// return ((pLeftBin->delta < pRightBin->delta)? -1:1); +// } +//} + +static int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val); + +SHistogramInfo* tHistogramCreate(int32_t numOfEntries) { + /* need one redundant slot */ + SHistogramInfo* pHisto = malloc(sizeof(SHistogramInfo) + sizeof(SHistBin) * (numOfEntries + 1)); + +#if !defined(USE_ARRAYLIST) + tSkipListCreate(&pHisto->pList, MAX_SKIP_LIST_LEVEL, TSDB_DATA_TYPE_DOUBLE, sizeof(double), NULL); + SInsertSupporter* pss = malloc(sizeof(SInsertSupporter)); + pss->numOfEntries = pHisto->maxEntries; + pss->pSkipList = pHisto->pList; + + int32_t ret = tLoserTreeCreate1(&pHisto->pLoserTree, numOfEntries, pss, compare); + pss->pTree = pHisto->pLoserTree; +#endif + + return tHistogramCreateFrom(pHisto, numOfEntries); +} + +SHistogramInfo* tHistogramCreateFrom(void* pBuf, int32_t numOfBins) { + memset(pBuf, 0, sizeof(SHistogramInfo) + sizeof(SHistBin) * (numOfBins + 1)); + + SHistogramInfo* pHisto = (SHistogramInfo*)pBuf; + pHisto->elems = (SHistBin*)((char*)pBuf + sizeof(SHistogramInfo)); + + pHisto->maxEntries = numOfBins; + + pHisto->min = DBL_MAX; + pHisto->max = -DBL_MAX; + + return pBuf; +} + +int32_t tHistogramAdd(SHistogramInfo** pHisto, double val) { + if (*pHisto == NULL) { + *pHisto = tHistogramCreate(MAX_HISTOGRAM_BIN); + } + +#if defined(USE_ARRAYLIST) + int32_t idx = vnodeHistobinarySearch((*pHisto)->elems, (*pHisto)->numOfEntries, val); + assert(idx >= 0 && idx <= (*pHisto)->maxEntries); + + if ((*pHisto)->elems[idx].val == val && idx >= 0) { + (*pHisto)->elems[idx].num += 1; + + if ((*pHisto)->numOfEntries == 0) { + (*pHisto)->numOfEntries += 1; + } + } else { /* insert a new slot */ + if ((*pHisto)->numOfElems > 1 && idx < (*pHisto)->numOfEntries) { + if (idx > 0) { + assert((*pHisto)->elems[idx - 1].val <= val); + } + + assert((*pHisto)->elems[idx].val > val); + } else { + assert((*pHisto)->elems[(*pHisto)->numOfEntries].val < val); + } + + histogramCreateBin(*pHisto, idx, val); + } +#else + tSkipListKey key = tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &val, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + SHistBin* entry = calloc(1, sizeof(SHistBin)); + entry->val = val; + + tSkipListNode* pResNode = tSkipListPut((*pHisto)->pList, entry, &key, 0); + + SHistBin* pEntry1 = (SHistBin*)pResNode->pData; + pEntry1->index = -1; + + tSkipListNode* pLast = NULL; + + if (pEntry1->num == 0) { /* it is a new node */ + (*pHisto)->numOfEntries += 1; + pEntry1->num += 1; + + /* number of entries reaches the upper limitation */ + if (pResNode->pForward[0] != NULL) { + /* we need to update the last updated slot in loser tree*/ + pEntry1->delta = ((SHistBin*)pResNode->pForward[0]->pData)->val - val; + + if ((*pHisto)->ordered) { + int32_t lastIndex = (*pHisto)->maxIndex; + SLoserTreeInfo* pTree = (*pHisto)->pLoserTree; + + (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].pData = pResNode; + pEntry1->index = (*pHisto)->pLoserTree->pNode[lastIndex + pTree->numOfEntries].index; + + // update the loser tree + if ((*pHisto)->ordered) { + tLoserTreeAdjust(pTree, pEntry1->index + pTree->numOfEntries); + } + + tSkipListKey kx = + tSkipListCreateKey(TSDB_DATA_TYPE_DOUBLE, &(*pHisto)->max, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize); + pLast = tSkipListGetOne((*pHisto)->pList, &kx); + } + } else { + /* this node located at the last position of the skiplist, we do not + * update the loser-tree */ + pEntry1->delta = DBL_MAX; + pLast = pResNode; + } + + if (pResNode->pBackward[0] != &(*pHisto)->pList->pHead) { + SHistBin* pPrevEntry = (SHistBin*)pResNode->pBackward[0]->pData; + pPrevEntry->delta = val - pPrevEntry->val; + + SLoserTreeInfo* pTree = (*pHisto)->pLoserTree; + if ((*pHisto)->ordered) { + tLoserTreeAdjust(pTree, pPrevEntry->index + pTree->numOfEntries); + tLoserTreeDisplay(pTree); + } + } + + if ((*pHisto)->numOfEntries >= (*pHisto)->maxEntries + 1) { + // set the right value for loser-tree + assert((*pHisto)->pLoserTree != NULL); + if (!(*pHisto)->ordered) { + tSkipListPrint((*pHisto)->pList, 1); + + SLoserTreeInfo* pTree = (*pHisto)->pLoserTree; + tSkipListNode* pHead = (*pHisto)->pList->pHead.pForward[0]; + + tSkipListNode* p1 = pHead; + + printf("\n"); + while (p1 != NULL) { + printf("%f\t", ((SHistBin*)(p1->pData))->delta); + p1 = p1->pForward[0]; + } + printf("\n"); + + /* last one in skiplist is ignored */ + for (int32_t i = pTree->numOfEntries; i < pTree->totalEntries; ++i) { + pTree->pNode[i].pData = pHead; + pTree->pNode[i].index = i - pTree->numOfEntries; + SHistBin* pBin = (SHistBin*)pHead->pData; + pBin->index = pTree->pNode[i].index; + + pHead = pHead->pForward[0]; + } + + pLast = pHead; + + for (int32_t i = 0; i < pTree->numOfEntries; ++i) { + pTree->pNode[i].index = -1; + } + + tLoserTreeDisplay(pTree); + + for (int32_t i = pTree->totalEntries - 1; i >= pTree->numOfEntries; i--) { + tLoserTreeAdjust(pTree, i); + } + + tLoserTreeDisplay(pTree); + (*pHisto)->ordered = true; + } + + printf("delta is:%lf\n", pEntry1->delta); + + tSkipListPrint((*pHisto)->pList, 1); + + /* the chosen node */ + tSkipListNode* pNode = (*pHisto)->pLoserTree->pNode[0].pData; + SHistBin* pEntry = (SHistBin*)pNode->pData; + + tSkipListNode* pNext = pNode->pForward[0]; + SHistBin* pNextEntry = (SHistBin*)pNext->pData; + assert(pNextEntry->val - pEntry->val == pEntry->delta); + + double newVal = (pEntry->val * pEntry->num + pNextEntry->val * pNextEntry->num) / (pEntry->num + pNextEntry->num); + pEntry->val = newVal; + pNode->key.dKey = newVal; + pEntry->num = pEntry->num + pNextEntry->num; + + // update delta value in current node + pEntry->delta = (pNextEntry->delta + pNextEntry->val) - pEntry->val; + + // reset delta value in the previous node + SHistBin* pPrevEntry = (SHistBin*)pNode->pBackward[0]->pData; + if (pPrevEntry) { + pPrevEntry->delta = pEntry->val - pPrevEntry->val; + } + + SLoserTreeInfo* pTree = (*pHisto)->pLoserTree; + if (pNextEntry->index != -1) { + (*pHisto)->maxIndex = pNextEntry->index; + + // set the last element in skiplist, of which delta is FLT_MAX; + pTree->pNode[pNextEntry->index + pTree->numOfEntries].pData = pLast; + ((SHistBin*)pLast->pData)->index = pNextEntry->index; + int32_t f = pTree->pNode[pNextEntry->index + pTree->numOfEntries].index; + printf("disappear index is:%d\n", f); + } + + tLoserTreeAdjust(pTree, pEntry->index + pTree->numOfEntries); + // remove the next node in skiplist + tSkipListRemoveNode((*pHisto)->pList, pNext); + tSkipListPrint((*pHisto)->pList, 1); + + tLoserTreeDisplay((*pHisto)->pLoserTree); + } else { // add to heap + if (pResNode->pForward[0] != NULL) { + pEntry1->delta = ((SHistBin*)pResNode->pForward[0]->pData)->val - val; + } else { + pEntry1->delta = DBL_MAX; + } + + if (pResNode->pBackward[0] != &(*pHisto)->pList->pHead) { + SHistBin* pPrevEntry = (SHistBin*)pResNode->pBackward[0]->pData; + pEntry1->delta = val - pPrevEntry->val; + } + + printf("delta is:%9lf\n", pEntry1->delta); + } + + } else { + SHistBin* pEntry = (SHistBin*)pResNode->pData; + assert(pEntry->val == val); + pEntry->num += 1; + } + +#endif + if (val > (*pHisto)->max) { + (*pHisto)->max = val; + } + + if (val < (*pHisto)->min) { + (*pHisto)->min = val; + } + + (*pHisto)->numOfElems += 1; + return 0; +} + +int32_t vnodeHistobinarySearch(SHistBin* pEntry, int32_t len, double val) { + int32_t end = len - 1; + int32_t start = 0; + + while (start <= end) { + int32_t mid = (end - start) / 2 + start; + if (pEntry[mid].val == val) { + return mid; + } + + if (pEntry[mid].val < val) { + start = mid + 1; + } else { + end = mid - 1; + } + } + + int32_t ret = start > end ? start : end; + if (ret < 0) { + return 0; + } else { + return ret; + } +} + +static void histogramMergeImpl(SHistBin* pHistBin, int32_t* size) { +#if defined(USE_ARRAYLIST) + int32_t oldSize = *size; + + double delta = DBL_MAX; + int32_t index = -1; + for (int32_t i = 1; i < oldSize; ++i) { + double d = pHistBin[i].val - pHistBin[i - 1].val; + if (d < delta) { + delta = d; + index = i - 1; + } + } + + SHistBin* s1 = &pHistBin[index]; + SHistBin* s2 = &pHistBin[index + 1]; + + double newVal = (s1->val * s1->num + s2->val * s2->num) / (s1->num + s2->num); + s1->val = newVal; + s1->num = s1->num + s2->num; + + memmove(&pHistBin[index + 1], &pHistBin[index + 2], (oldSize - index - 2) * sizeof(SHistBin)); + (*size) -= 1; +#endif +} + +/* optimize this procedure */ +int32_t histogramCreateBin(SHistogramInfo* pHisto, int32_t index, double val) { +#if defined(USE_ARRAYLIST) + int32_t remain = pHisto->numOfEntries - index; + if (remain > 0) { + memmove(&pHisto->elems[index + 1], &pHisto->elems[index], sizeof(SHistBin) * remain); + } + + assert(index >= 0 && index <= pHisto->maxEntries); + + pHisto->elems[index].num = 1; + pHisto->elems[index].val = val; + pHisto->numOfEntries += 1; + + /* we need to merge the slot */ + if (pHisto->numOfEntries == pHisto->maxEntries + 1) { + histogramMergeImpl(pHisto->elems, &pHisto->numOfEntries); + + pHisto->elems[pHisto->maxEntries].val = 0; + pHisto->elems[pHisto->maxEntries].num = 0; + } +#endif + assert(pHisto->numOfEntries <= pHisto->maxEntries); + return 0; +} + +void tHistogramDestroy(SHistogramInfo** pHisto) { + if (*pHisto == NULL) { + return; + } + + free(*pHisto); + *pHisto = NULL; +} + +void tHistogramPrint(SHistogramInfo* pHisto) { + printf("total entries: %d, elements: %d\n", pHisto->numOfEntries, pHisto->numOfElems); +#if defined(USE_ARRAYLIST) + for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { + printf("%d: (%f, %ld)\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num); + } +#else + tSkipListNode* pNode = pHisto->pList->pHead.pForward[0]; + + for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { + SHistBin* pEntry = (SHistBin*)pNode->pData; + printf("%d: (%f, %lld)\n", i + 1, pEntry->val, pEntry->num); + pNode = pNode->pForward[0]; + } +#endif +} + +/** + * Estimated number of points in the interval (−inf,b]. + * @param pHisto + * @param v + */ +int64_t tHistogramSum(SHistogramInfo* pHisto, double v) { +#if defined(USE_ARRAYLIST) + int32_t slotIdx = vnodeHistobinarySearch(pHisto->elems, pHisto->numOfEntries, v); + if (pHisto->elems[slotIdx].val != v) { + slotIdx -= 1; + + if (slotIdx < 0) { + slotIdx = 0; + assert(v <= pHisto->elems[slotIdx].val); + } else { + assert(v >= pHisto->elems[slotIdx].val); + + if (slotIdx + 1 < pHisto->numOfEntries) { + assert(v < pHisto->elems[slotIdx + 1].val); + } + } + } + + double m1 = pHisto->elems[slotIdx].num; + double v1 = pHisto->elems[slotIdx].val; + + double m2 = pHisto->elems[slotIdx + 1].num; + double v2 = pHisto->elems[slotIdx + 1].val; + + double estNum = m1 + (m2 - m1) * (v - v1) / (v2 - v1); + double s1 = (m1 + estNum) * (v - v1) / (2 * (v2 - v1)); + + for (int32_t i = 0; i < slotIdx; ++i) { + s1 += pHisto->elems[i].num; + } + + s1 = s1 + m1 / 2; + + return (int64_t)s1; +#endif +} + +double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { +#if defined(USE_ARRAYLIST) + double* pVal = malloc(num * sizeof(double)); + + for (int32_t i = 0; i < num; ++i) { + double numOfElem = (ratio[i] / 100) * pHisto->numOfElems; + + if (numOfElem == 0) { + pVal[i] = pHisto->min; + continue; + } else if (numOfElem <= pHisto->elems[0].num) { + pVal[i] = pHisto->elems[0].val; + continue; + } else if (numOfElem == pHisto->numOfElems) { + pVal[i] = pHisto->max; + continue; + } + + int32_t j = 0; + int64_t total = 0; + + while (j < pHisto->numOfEntries) { + total += pHisto->elems[j].num; + if (total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem) { + break; + } + + j += 1; + } + + assert(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem); + + double delta = numOfElem - total; + if (fabs(delta) < FLT_EPSILON) { + pVal[i] = pHisto->elems[j].val; + } + + double start = pHisto->elems[j].num; + double range = pHisto->elems[j + 1].num - start; + + if (range == 0) { + pVal[i] = (pHisto->elems[j + 1].val - pHisto->elems[j].val) * delta / start + pHisto->elems[j].val; + } else { + double factor = (-2 * start + sqrt(4 * start * start - 4 * range * (-2 * delta))) / (2 * range); + pVal[i] = pHisto->elems[j].val + (pHisto->elems[j + 1].val - pHisto->elems[j].val) * factor; + } + } +#else + double* pVal = malloc(num * sizeof(double)); + + for (int32_t i = 0; i < num; ++i) { + double numOfElem = ratio[i] * pHisto->numOfElems; + + tSkipListNode* pFirst = pHisto->pList->pHead.pForward[0]; + SHistBin* pEntry = (SHistBin*)pFirst->pData; + if (numOfElem == 0) { + pVal[i] = pHisto->min; + printf("i/numofSlot: %f, v:%f, %f\n", ratio[i], numOfElem, pVal[i]); + continue; + } else if (numOfElem <= pEntry->num) { + pVal[i] = pEntry->val; + printf("i/numofSlot: %f, v:%f, %f\n", ratio[i], numOfElem, pVal[i]); + continue; + } else if (numOfElem == pHisto->numOfElems) { + pVal[i] = pHisto->max; + printf("i/numofSlot: %f, v:%f, %f\n", ratio[i], numOfElem, pVal[i]); + continue; + } + + int32_t j = 0; + int64_t total = 0; + SHistBin* pPrev = pEntry; + + while (j < pHisto->numOfEntries) { + if (total <= numOfElem && total + pEntry->num > numOfElem) { + break; + } + + total += pEntry->num; + pPrev = pEntry; + + pFirst = pFirst->pForward[0]; + pEntry = (SHistBin*)pFirst->pData; + + j += 1; + } + + assert(total <= numOfElem && total + pEntry->num > numOfElem); + + double delta = numOfElem - total; + if (fabs(delta) < FLT_EPSILON) { + // printf("i/numofSlot: %f, v:%f, %f\n", + // (double)i/numOfSlots, numOfElem, pHisto->elems[j].val); + pVal[i] = pPrev->val; + } + + double start = pPrev->num; + double range = pEntry->num - start; + + if (range == 0) { + pVal[i] = (pEntry->val - pPrev->val) * delta / start + pPrev->val; + } else { + double factor = (-2 * start + sqrt(4 * start * start - 4 * range * (-2 * delta))) / (2 * range); + pVal[i] = pPrev->val + (pEntry->val - pPrev->val) * factor; + } + // printf("i/numofSlot: %f, v:%f, %f\n", (double)i/numOfSlots, + // numOfElem, val); + } +#endif + return pVal; +} + +SHistogramInfo* tHistogramMerge(SHistogramInfo* pHisto1, SHistogramInfo* pHisto2, int32_t numOfEntries) { + SHistogramInfo* pResHistogram = tHistogramCreate(numOfEntries); + + SHistBin* pHistoBins = calloc(1, sizeof(SHistBin) * (pHisto1->numOfEntries + pHisto2->numOfEntries)); + + int32_t i = 0; + int32_t j = 0; + int32_t k = 0; + while (i < pHisto1->numOfEntries && j < pHisto2->numOfEntries) { + if (pHisto1->elems[i].val < pHisto2->elems[j].val) { + pHistoBins[k++] = pHisto1->elems[i++]; + } else if (pHisto1->elems[i].val > pHisto2->elems[j].val) { + pHistoBins[k++] = pHisto2->elems[j++]; + } else { + pHistoBins[k] = pHisto1->elems[i++]; + pHistoBins[k++].num += pHisto2->elems[j++].num; + } + } + + if (i < pHisto1->numOfEntries) { + int32_t remain = pHisto1->numOfEntries - i; + memcpy(&pHistoBins[k], &pHisto1->elems[i], sizeof(SHistBin) * remain); + k += remain; + } + + if (j < pHisto2->numOfEntries) { + int32_t remain = pHisto2->numOfEntries - j; + memcpy(&pHistoBins[k], &pHisto2->elems[j], sizeof(SHistBin) * remain); + k += remain; + } + + /* update other information */ + pResHistogram->numOfElems = pHisto1->numOfElems + pHisto2->numOfElems; + pResHistogram->min = (pHisto1->min < pHisto2->min) ? pHisto1->min : pHisto2->min; + pResHistogram->max = (pHisto1->max > pHisto2->max) ? pHisto1->max : pHisto2->max; + + while (k > numOfEntries) { + histogramMergeImpl(pHistoBins, &k); + } + + memcpy(pResHistogram->elems, pHistoBins, sizeof(SHistBin) * numOfEntries); + pResHistogram->numOfEntries = k; + + free(pHistoBins); + return pResHistogram; +} \ No newline at end of file diff --git a/src/util/src/tidpool.c b/src/util/src/tidpool.c new file mode 100644 index 000000000000..cf999974a801 --- /dev/null +++ b/src/util/src/tidpool.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include "tlog.h" + +typedef struct { + int maxId; + int numOfFree; + int freeSlot; + int * freeList; + pthread_mutex_t mutex; +} id_pool_t; + +void *taosInitIdPool(int maxId) { + id_pool_t *pIdPool; + int * idList, i; + + if (maxId < 3) maxId = 3; + + pIdPool = (id_pool_t *)malloc(sizeof(id_pool_t)); + if (pIdPool == NULL) return NULL; + + idList = (int *)malloc(sizeof(int) * (size_t)maxId); + if (idList == NULL) { + free(pIdPool); + return NULL; + } + + memset(pIdPool, 0, sizeof(id_pool_t)); + pIdPool->maxId = maxId; + pIdPool->numOfFree = maxId - 1; + pIdPool->freeSlot = 0; + pIdPool->freeList = idList; + + pthread_mutex_init(&pIdPool->mutex, NULL); + + for (i = 1; i < maxId; ++i) idList[i - 1] = i; + + pTrace("pool:%p is setup, maxId:%d", pIdPool, pIdPool->maxId); + + return (void *)pIdPool; +} + +int taosAllocateId(void *handle) { + id_pool_t *pIdPool; + int id = -1; + if (handle == NULL) return id; + + pIdPool = (id_pool_t *)handle; + + if (pIdPool->maxId < 3) pError("pool:%p is messed up, maxId:%d", pIdPool, pIdPool->maxId); + + if (pthread_mutex_lock(&pIdPool->mutex) != 0) perror("lock pIdPool Mutex"); + + if (pIdPool->numOfFree > 0) { + id = pIdPool->freeList[pIdPool->freeSlot]; + pIdPool->freeSlot = (pIdPool->freeSlot + 1) % pIdPool->maxId; + pIdPool->numOfFree--; + } + + if (pthread_mutex_unlock(&pIdPool->mutex) != 0) perror("unlock pIdPool Mutex"); + + return id; +} + +void taosFreeId(void *handle, int id) { + id_pool_t *pIdPool; + int slot; + + pIdPool = (id_pool_t *)handle; + if (pIdPool->freeList == NULL || pIdPool->maxId == 0) return; + if (id <= 0 || id >= pIdPool->maxId) return; + if (pthread_mutex_lock(&pIdPool->mutex) != 0) perror("lock pIdPool Mutex"); + + slot = (pIdPool->freeSlot + pIdPool->numOfFree) % pIdPool->maxId; + pIdPool->freeList[slot] = id; + pIdPool->numOfFree++; + + if (pthread_mutex_unlock(&pIdPool->mutex) != 0) perror("unlock pIdPool Mutex"); +} + +void taosIdPoolCleanUp(void *handle) { + id_pool_t *pIdPool; + + if (handle == NULL) return; + pIdPool = (id_pool_t *)handle; + + pTrace("pool:%p is cleaned", pIdPool); + + if (pIdPool->freeList) free(pIdPool->freeList); + + pthread_mutex_destroy(&pIdPool->mutex); + + memset(pIdPool, 0, sizeof(id_pool_t)); + + free(pIdPool); +} + +int taosIdPoolNumOfUsed(void *handle) { + id_pool_t *pIdPool = (id_pool_t *)handle; + + return pIdPool->maxId - pIdPool->numOfFree - 1; +} + +void taosIdPoolReinit(void *handle) { + id_pool_t *pIdPool; + + pIdPool = (id_pool_t *)handle; + pIdPool->numOfFree = 0; + pIdPool->freeSlot = 0; + + for (int i = 0; i < pIdPool->maxId; ++i) pIdPool->freeList[i] = 0; +} + +void taosIdPoolMarkStatus(void *handle, int id, int status) { + id_pool_t *pIdPool = (id_pool_t *)handle; + + pIdPool->freeList[id] = status; +} + +void taosIdPoolSetFreeList(void *handle) { + id_pool_t *pIdPool; + int pos = 0; + + pIdPool = (id_pool_t *)handle; + pIdPool->numOfFree = 0; + pIdPool->freeSlot = 0; + + for (int i = 1; i < pIdPool->maxId; ++i) { + if (pIdPool->freeList[i] == 0) { + pIdPool->freeList[pos] = i; + pIdPool->numOfFree++; + pos++; + } + } +} diff --git a/src/util/src/tinterpolation.c b/src/util/src/tinterpolation.c new file mode 100644 index 000000000000..d036989410ac --- /dev/null +++ b/src/util/src/tinterpolation.c @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "taosmsg.h" +#include "textbuffer.h" +#include "tinterpolation.h" +#include "tsqlfunction.h" +#include "ttypes.h" + +#define INTERPOL_IS_ASC_INTERPOL(interp) ((interp)->order == TSQL_SO_ASC) + +int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char intervalTimeUnit) { + if (timeRange == 0) { + return startTime; + } + + if (intervalTimeUnit == 'a' || intervalTimeUnit == 'm' || intervalTimeUnit == 's' || intervalTimeUnit == 'h') { + return (startTime / timeRange) * timeRange; + } else { + /* + * here we revised the start time of day according to the local time zone, + * but in case of DST, the start time of one day need to be dynamically decided. + * + * TODO dynmaically decide the start time of a day + */ + int64_t revStartime = (startTime / timeRange) * timeRange + timezone * MILLISECOND_PER_SECOND; + int64_t revEndtime = revStartime + timeRange - 1; + if (revEndtime < startTime) { + revStartime += timeRange; + } + + return revStartime; + } +} + +void taosInitInterpoInfo(SInterpolationInfo* pInterpoInfo, int32_t order, int64_t startTimestamp, + int32_t numOfGroupbyTags, int32_t rowSize) { + pInterpoInfo->startTimestamp = startTimestamp; + pInterpoInfo->rowIdx = -1; + pInterpoInfo->numOfRawDataInRows = 0; + pInterpoInfo->numOfCurrentInterpo = 0; + pInterpoInfo->numOfTotalInterpo = 0; + pInterpoInfo->order = order; + + pInterpoInfo->numOfTags = numOfGroupbyTags; + if (pInterpoInfo->pTags == NULL && numOfGroupbyTags > 0) { + pInterpoInfo->pTags = calloc(1, numOfGroupbyTags * POINTER_BYTES + rowSize); + } + + // set the previous value to be null + tfree(pInterpoInfo->prevValues); +} + +void taosInterpoSetStartInfo(SInterpolationInfo* pInterpoInfo, int32_t numOfRawDataInRows, int32_t type) { + if (type == TSDB_INTERPO_NONE) { + return; + } + + pInterpoInfo->rowIdx = INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1; + pInterpoInfo->numOfRawDataInRows = numOfRawDataInRows; +} + +TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int32_t timeInterval, int8_t intervalTimeUnit) { + if (order == TSQL_SO_ASC) { + return ekey; + } else { + return taosGetIntervalStartTimestamp(ekey, timeInterval, intervalTimeUnit); + } +} + +int32_t taosGetNumOfResultWithInterpo(SInterpolationInfo* pInterpoInfo, TSKEY* pPrimaryKeyArray, + int32_t numOfRawDataInRows, int64_t nInterval, int64_t ekey, + int32_t maxNumOfRows) { + int32_t numOfRes = taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeyArray, numOfRawDataInRows, nInterval, ekey); + return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes; +} + +int32_t taosGetNumOfResWithoutLimit(SInterpolationInfo* pInterpoInfo, int64_t* pPrimaryKeyArray, + int32_t numOfAvailRawData, int64_t nInterval, int64_t ekey) { + if (numOfAvailRawData > 0) { + int32_t finalNumOfResult = 0; + + if (pInterpoInfo->order == TSQL_SO_ASC) { + // get last timestamp, calculate the result size + int64_t lastKey = pPrimaryKeyArray[pInterpoInfo->numOfRawDataInRows - 1]; + finalNumOfResult = (int32_t)((lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1; + } else { // todo error less than one!!! + TSKEY lastKey = pPrimaryKeyArray[0]; + finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1; + } + + assert(finalNumOfResult >= numOfAvailRawData); + return finalNumOfResult; + } else { + /* reach the end of data */ + if ((ekey < pInterpoInfo->startTimestamp && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || + (ekey > pInterpoInfo->startTimestamp && !INTERPOL_IS_ASC_INTERPOL(pInterpoInfo))) { + return 0; + } else { + return (int32_t)(labs(ekey - pInterpoInfo->startTimestamp) / nInterval) + 1; + } + } +} + +bool taosHasNoneInterpoPoints(SInterpolationInfo* pInterpoInfo) { return taosNumOfRemainPoints(pInterpoInfo) > 0; } + +int32_t taosNumOfRemainPoints(SInterpolationInfo* pInterpoInfo) { + if (pInterpoInfo->rowIdx == -1 || pInterpoInfo->numOfRawDataInRows == 0) { + return 0; + } + + return INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? (pInterpoInfo->numOfRawDataInRows - pInterpoInfo->rowIdx) + : pInterpoInfo->rowIdx + 1; +} + +static double doLinearInterpolationImpl(double v1, double v2, double k1, double k2, double k) { + return v1 + (v2 - v1) * (k - k1) / (k2 - k1); +} + +int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoint* point) { + switch (type) { + case TSDB_DATA_TYPE_INT: { + *(int32_t*)point->val = doLinearInterpolationImpl(*(int32_t*)point1->val, *(int32_t*)point2->val, point1->key, + point2->key, point->key); + break; + } + case TSDB_DATA_TYPE_FLOAT: { + *(float*)point->val = + doLinearInterpolationImpl(*(float*)point1->val, *(float*)point2->val, point1->key, point2->key, point->key); + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + *(double*)point->val = + doLinearInterpolationImpl(*(double*)point1->val, *(double*)point2->val, point1->key, point2->key, point->key); + break; + }; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: { + *(int64_t*)point->val = doLinearInterpolationImpl(*(int64_t*)point1->val, *(int64_t*)point2->val, point1->key, + point2->key, point->key); + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + *(int16_t*)point->val = doLinearInterpolationImpl(*(int16_t*)point1->val, *(int16_t*)point2->val, point1->key, + point2->key, point->key); + break; + }; + case TSDB_DATA_TYPE_TINYINT: { + *(int8_t*)point->val = + doLinearInterpolationImpl(*(int8_t*)point1->val, *(int8_t*)point2->val, point1->key, point2->key, point->key); + break; + }; + default: { + // TODO: Deal with interpolation with bool and strings and timestamp + return -1; + } + } + + return 0; +} + +static char* getPos(char* data, int32_t bytes, int32_t order, int32_t capacity, int32_t index) { + if (order == TSQL_SO_ASC) { + return data + index * bytes; + } else { + return data + (capacity - index - 1) * bytes; + } +} + +static void setTagsValueInInterpolation(tFilePage** data, char** pTags, tColModel* pModel, int32_t order, int32_t start, + int32_t capacity, int32_t num) { + for (int32_t j = 0, i = start; i < pModel->numOfCols; ++i, ++j) { + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, order, capacity, num); + assignVal(val1, pTags[j], pModel->pFields[i].bytes, pModel->pFields[i].type); + } +} + +static void doInterpoResultImpl(SInterpolationInfo* pInterpoInfo, int16_t interpoType, tFilePage** data, + tColModel* pModel, int32_t* num, char** srcData, int64_t nInterval, int64_t* defaultVal, + int64_t currentTimestamp, int32_t capacity, int32_t numOfTags, char** pTags, + bool outOfBound) { + char** prevValues = &pInterpoInfo->prevValues; + char** nextValues = &pInterpoInfo->nextValues; + + SPoint point1, point2, point; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pInterpoInfo->order); + + char* val = getPos(data[0]->data, TSDB_KEYSIZE, pInterpoInfo->order, capacity, *num); + *(TSKEY*)val = pInterpoInfo->startTimestamp; + + int32_t numOfValCols = pModel->numOfCols - numOfTags; + + // set the other values + if (interpoType == TSDB_INTERPO_PREV) { + char* pInterpolationData = INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? *prevValues : *nextValues; + if (pInterpolationData != NULL) { + for (int32_t i = 1; i < numOfValCols; ++i) { + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + + if (isNull(pInterpolationData + pModel->colOffset[i], pModel->pFields[i].type)) { + setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + } else { + assignVal(val1, pInterpolationData + pModel->colOffset[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + } + } + } else { /* no prev value yet, set the value for null */ + for (int32_t i = 1; i < numOfValCols; ++i) { + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + } + } + + setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); + } else if (interpoType == TSDB_INTERPO_LINEAR) { + // TODO : linear interpolation supports NULL value + if (*prevValues != NULL && !outOfBound) { + for (int32_t i = 1; i < numOfValCols; ++i) { + int32_t type = pModel->pFields[i].type; + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) { + setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + continue; + } + + point1 = (SPoint){.key = *(TSKEY*)(*prevValues), .val = *prevValues + pModel->colOffset[i]}; + point2 = (SPoint){.key = currentTimestamp, .val = srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes}; + point = (SPoint){.key = pInterpoInfo->startTimestamp, .val = val1}; + taosDoLinearInterpolation(pModel->pFields[i].type, &point1, &point2, &point); + } + + setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); + + } else { + for (int32_t i = 1; i < numOfValCols; ++i) { + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + setNull(val1, pModel->pFields[i].type, pModel->pFields[i].bytes); + } + + setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); + } + } else { /* default value interpolation */ + for (int32_t i = 1; i < numOfValCols; ++i) { + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, capacity, *num); + assignVal(val1, (char*)&defaultVal[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + } + + setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, numOfValCols, capacity, *num); + } + + pInterpoInfo->startTimestamp += (nInterval * step); + pInterpoInfo->numOfCurrentInterpo++; + + (*num) += 1; +} + +int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoType, tFilePage** data, + int32_t numOfRawDataInRows, int32_t outputRows, int64_t nInterval, + int64_t* pPrimaryKeyArray, tColModel* pModel, char** srcData, int64_t* defaultVal, + int32_t* functionIDs, int32_t bufSize) { + int32_t num = 0; + pInterpoInfo->numOfCurrentInterpo = 0; + + char** prevValues = &pInterpoInfo->prevValues; + char** nextValues = &pInterpoInfo->nextValues; + + int32_t numOfTags = pInterpoInfo->numOfTags; + char** pTags = pInterpoInfo->pTags; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pInterpoInfo->order); + + if (numOfRawDataInRows == 0) { + /* + * we need to rebuild whole data + * NOTE:we need to keep the last saved data, to satisfy the interpolation + */ + while (num < outputRows) { + doInterpoResultImpl(pInterpoInfo, interpoType, data, pModel, &num, srcData, nInterval, defaultVal, + pInterpoInfo->startTimestamp, bufSize, numOfTags, pTags, true); + } + pInterpoInfo->numOfTotalInterpo += pInterpoInfo->numOfCurrentInterpo; + return outputRows; + + } else { + while (1) { + int64_t currentTimestamp = pPrimaryKeyArray[pInterpoInfo->rowIdx]; + + if ((pInterpoInfo->startTimestamp < currentTimestamp && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || + (pInterpoInfo->startTimestamp > currentTimestamp && !INTERPOL_IS_ASC_INTERPOL(pInterpoInfo))) { + /* set the next value for interpolation */ + if (*nextValues == NULL) { + *nextValues = + calloc(1, pModel->colOffset[pModel->numOfCols - 1] + pModel->pFields[pModel->numOfCols - 1].bytes); + for (int i = 1; i < pModel->numOfCols; i++) { + setNull(*nextValues + pModel->colOffset[i], pModel->pFields[i].type, pModel->pFields[i].bytes); + } + } + + int32_t offset = pInterpoInfo->rowIdx; + for (int32_t tlen = 0, i = 0; i < pModel->numOfCols - numOfTags; ++i) { + memcpy(*nextValues + tlen, srcData[i] + offset * pModel->pFields[i].bytes, pModel->pFields[i].bytes); + tlen += pModel->pFields[i].bytes; + } + } + + while (((pInterpoInfo->startTimestamp < currentTimestamp && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || + (pInterpoInfo->startTimestamp > currentTimestamp && !INTERPOL_IS_ASC_INTERPOL(pInterpoInfo))) && + num < outputRows) { + doInterpoResultImpl(pInterpoInfo, interpoType, data, pModel, &num, srcData, nInterval, defaultVal, + currentTimestamp, bufSize, numOfTags, pTags, false); + } + + /* output buffer is full, abort */ + if ((num == outputRows && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || + (num < 0 && !INTERPOL_IS_ASC_INTERPOL(pInterpoInfo))) { + pInterpoInfo->numOfTotalInterpo += pInterpoInfo->numOfCurrentInterpo; + return outputRows; + } + + if (pInterpoInfo->startTimestamp == currentTimestamp) { + if (*prevValues == NULL) { + *prevValues = + calloc(1, pModel->colOffset[pModel->numOfCols - 1] + pModel->pFields[pModel->numOfCols - 1].bytes); + for (int i = 1; i < pModel->numOfCols; i++) { + setNull(*prevValues + pModel->colOffset[i], pModel->pFields[i].type, pModel->pFields[i].bytes); + } + } + + // assign rows to dst buffer + int32_t i = 0; + for (int32_t tlen = 0; i < pModel->numOfCols - numOfTags; ++i) { + char* val1 = getPos(data[i]->data, pModel->pFields[i].bytes, pInterpoInfo->order, bufSize, num); + + if (i == 0 || + (functionIDs[i] != TSDB_FUNC_COUNT && + !isNull(srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes, pModel->pFields[i].type)) || + (functionIDs[i] == TSDB_FUNC_COUNT && + *(int64_t*)(srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes) != 0)) { + assignVal(val1, srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes, pModel->pFields[i].bytes, + pModel->pFields[i].type); + memcpy(*prevValues + tlen, srcData[i] + pInterpoInfo->rowIdx * pModel->pFields[i].bytes, + pModel->pFields[i].bytes); + } else { // i > 0 and isNULL, do interpolation + if (interpoType == TSDB_INTERPO_PREV) { + assignVal(val1, *prevValues + pModel->colOffset[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + } else if (interpoType == TSDB_INTERPO_LINEAR) { + // TODO: + } else { + assignVal(val1, (char*)&defaultVal[i], pModel->pFields[i].bytes, pModel->pFields[i].type); + } + } + tlen += pModel->pFields[i].bytes; + } + + /* set the tag value for final result */ + setTagsValueInInterpolation(data, pTags, pModel, pInterpoInfo->order, pModel->numOfCols - numOfTags, bufSize, + num); + } + + pInterpoInfo->startTimestamp += (nInterval * step); + pInterpoInfo->rowIdx += step; + num += 1; + + if ((pInterpoInfo->rowIdx >= pInterpoInfo->numOfRawDataInRows && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || + (pInterpoInfo->rowIdx < 0 && !INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) || num >= outputRows) { + if (pInterpoInfo->rowIdx >= pInterpoInfo->numOfRawDataInRows || pInterpoInfo->rowIdx < 0) { + pInterpoInfo->rowIdx = -1; + pInterpoInfo->numOfRawDataInRows = 0; + + /* the raw data block is exhausted, next value does not exists */ + tfree(*nextValues); + } + + pInterpoInfo->numOfTotalInterpo += pInterpoInfo->numOfCurrentInterpo; + return num; + } + } + } +} diff --git a/src/util/src/tlinux.c b/src/util/src/tlinux.c new file mode 100644 index 000000000000..7d0305bb4f11 --- /dev/null +++ b/src/util/src/tlinux.c @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tlog.h" +#include "tsdb.h" +#include "tutil.h" + +char configDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char logDir[TSDB_FILENAME_LEN] = "/var/log/taos"; +char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; + +int64_t str2int64(char *str) { + char *endptr = NULL; + return strtoll(str, &endptr, 10); +} + +/* + to make taosMsleep work, + signal SIGALRM shall be blocked in the calling thread, + + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGALRM); + pthread_sigmask(SIG_BLOCK, &set, NULL); +*/ +void taosMsleep(int mseconds) { + struct timeval timeout; + int seconds, useconds; + + seconds = mseconds / 1000; + useconds = (mseconds % 1000) * 1000; + timeout.tv_sec = seconds; + timeout.tv_usec = useconds; + + /* sigset_t set; */ + /* sigemptyset(&set); */ + /* sigaddset(&set, SIGALRM); */ + /* pthread_sigmask(SIG_BLOCK, &set, NULL); */ + + select(0, NULL, NULL, NULL, &timeout); + + /* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */ +} + +bool taosCheckPthreadValid(pthread_t thread) { return thread != 0; } + +void taosResetPthread(pthread_t *thread) { *thread = 0; } + +int64_t taosGetPthreadId() { return (int64_t)pthread_self(); } + +/* +* Function to get the private ip address of current machine. If get IP +* successfully, return 0, else, return -1. The return values is ip. +* +* Use: +* if (taosGetPrivateIp(ip) != 0) { +* perror("Fail to get private IP address\n"); +* exit(EXIT_FAILURE); +* } +*/ +int taosGetPrivateIp(char *const ip) { + bool hasLoCard = false; + + struct ifaddrs *ifaddr, *ifa; + int family, s; + char host[NI_MAXHOST]; + + if (getifaddrs(&ifaddr) == -1) { + return -1; + } + + /* Walk through linked list, maintaining head pointer so we can free list later */ + int flag = 0; + for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) { + if (ifa->ifa_addr == NULL) continue; + + family = ifa->ifa_addr->sa_family; + if (strcmp("lo", ifa->ifa_name) == 0) { + hasLoCard = true; + continue; + } + + if (family == AF_INET) { + /* printf("%-8s", ifa->ifa_name); */ + s = getnameinfo(ifa->ifa_addr, (family == AF_INET) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6), + host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST); + if (s != 0) { + freeifaddrs(ifaddr); + return -1; + } + + strcpy(ip, host); + flag = 1; + break; + } + } + + freeifaddrs(ifaddr); + if (flag) { + return 0; + } else { + if (hasLoCard) { + pPrint("no net card was found, use lo:127.0.0.1 as default"); + strcpy(ip, "127.0.0.1"); + return 0; + } + return -1; + } +} + +int taosSetNonblocking(int sock, int on) { + int flags = 0; + if ((flags = fcntl(sock, F_GETFL, 0)) < 0) { + pError("fcntl(F_GETFL) error: %d (%s)\n", errno, strerror(errno)); + return 1; + } + + if (on) + flags |= O_NONBLOCK; + else + flags &= ~O_NONBLOCK; + + if ((flags = fcntl(sock, F_SETFL, flags)) < 0) { + pError("fcntl(F_SETFL) error: %d (%s)\n", errno, strerror(errno)); + return 1; + } + + return 0; +} + +int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optlen) { + return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); +} + +int taosOpenUDClientSocket(char *ip, short port) { + int sockFd = 0; + struct sockaddr_un serverAddr; + int ret; + char name[128]; + sprintf(name, "%s.%d", ip, port); + + sockFd = socket(AF_UNIX, SOCK_STREAM, 0); + + if (sockFd < 0) { + pError("failed to open the UD socket:%s, reason:%s", name, strerror(errno)); + return -1; + } + + memset((char *)&serverAddr, 0, sizeof(serverAddr)); + serverAddr.sun_family = AF_UNIX; + strcpy(serverAddr.sun_path + 1, name); + + ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); + + if (ret != 0) { + pError("failed to connect UD socket, name:%d, reason: %s", name, strerror(errno)); + sockFd = -1; + } + + return sockFd; +} + +int taosOpenUDServerSocket(char *ip, short port) { + struct sockaddr_un serverAdd; + int sockFd; + char name[128]; + + pTrace("open ud socket:%s", name); + // if (tsAllowLocalhost) ip = "0.0.0.0"; + sprintf(name, "%s.%d", ip, port); + + bzero((char *)&serverAdd, sizeof(serverAdd)); + serverAdd.sun_family = AF_UNIX; + strcpy(serverAdd.sun_path + 1, name); + unlink(name); + + if ((sockFd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { + pError("failed to open UD socket:%s, reason:%s", name, strerror(errno)); + return -1; + } + + /* bind socket to server address */ + if (bind(sockFd, (struct sockaddr *)&serverAdd, sizeof(serverAdd)) < 0) { + pError("bind socket:%s failed, reason:%s", name, strerror(errno)); + tclose(sockFd); + return -1; + } + + if (listen(sockFd, 10) < 0) { + pError("listen socket:%s failed, reason:%s", name, strerror(errno)); + return -1; + } + + return sockFd; +} + +int taosInitTimer(void *(*callback)(void *), int ms) { + /******************************************************** + * Create SIGALRM loop thread + ********************************************************/ + pthread_t thread; + pthread_attr_t tattr; + if (pthread_attr_init(&tattr)) { + return -1; + } + + if (pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED)) { + return -1; + } + + int *tms = (int *)malloc(sizeof(int)); + *tms = ms; + if (pthread_create(&thread, &tattr, callback, (void *)tms)) { + return -1; + } + + if (pthread_attr_destroy(&tattr)) { + return -1; + } + + return 0; +} diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c new file mode 100644 index 000000000000..64679a3fa36a --- /dev/null +++ b/src/util/src/tlog.c @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tlog.h" +#include "tutil.h" + +#define MAX_LOGLINE_SIZE 1000 +#define LOG_FILE_NAME_LEN 300 +#define TSDB_DEFAULT_LOG_BUF_SIZE (64 * 1024) // 10K +#define TSDB_MIN_LOG_BUF_SIZE 1024 // 1K +#define TSDB_MAX_LOG_BUF_SIZE (1024 * 1024) // 1M +#define TSDB_DEFAULT_LOG_BUF_UNIT 1024 // 1K + +typedef struct { + char * buffer; + int buffStart; + int buffEnd; + int buffSize; + int fd; + int stop; + pthread_t asyncThread; + pthread_mutex_t buffMutex; + sem_t buffNotEmpty; +} SLogBuff; + +int uDebugFlag = 131; // all the messages +int tsAsyncLog = 1; + +static SLogBuff *logHandle; +static int taosLogFileNum = 1; +static int taosLogMaxLines = 0; +static int taosLogLines = 0; +static char taosLogName[LOG_FILE_NAME_LEN]; +static int taosLogFlag = 0; +// static int logFd = -1; +static int openInProgress = 0; +static pthread_mutex_t logMutex; +void (*taosLogFp)(int level, const char *const format, ...) = NULL; +void (*taosLogSqlFp)(char *sql) = NULL; +void (*taosLogAcctFp)(char *acctId, int64_t currentPointsPerSecond, int64_t maxPointsPerSecond, int64_t totalTimeSeries, + int64_t maxTimeSeries, int64_t totalStorage, int64_t maxStorage, int64_t totalQueryTime, + int64_t maxQueryTime, int64_t totalInbound, int64_t maxInbound, int64_t totalOutbound, + int64_t maxOutbound, int64_t totalDbs, int64_t maxDbs, int64_t totalUsers, int64_t maxUsers, + int64_t totalStreams, int64_t maxStreams, int64_t totalConns, int64_t maxConns, + int8_t accessState) = NULL; +void *taosAsyncOutputLog(void *param); +int taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int msgLen); +SLogBuff *taosLogBuffNew(int bufSize); +void taosLogBuffDestroy(SLogBuff *tLogBuff); + +int taosStartLog() { + pthread_attr_t threadAttr; + + pthread_attr_init(&threadAttr); + + if (pthread_create(&(logHandle->asyncThread), &threadAttr, taosAsyncOutputLog, logHandle) != 0) { + return -1; + } + + pthread_attr_destroy(&threadAttr); + + return 0; +} + +int taosInitLog(char *logName, int numOfLogLines, int maxFiles) { + logHandle = taosLogBuffNew(TSDB_DEFAULT_LOG_BUF_SIZE); + if (logHandle == NULL) return -1; + + if (taosOpenLogFileWithMaxLines(logName, numOfLogLines, maxFiles) < 0) return -1; + + if (taosStartLog() < 0) return -1; + return 0; +} + +void taosStopLog() { + if (logHandle) logHandle->stop = 1; +} + +void taosCloseLogger() { + taosStopLog(); + sem_post(&(logHandle->buffNotEmpty)); + pthread_join(logHandle->asyncThread, NULL); + // In case that other threads still use log resources causing invalid write in + // valgrind, we comment two lines below. + // taosLogBuffDestroy(logHandle); + // taosCloseLog(); +} + +void taosCloseLogByFd(int oldFd); +bool taosLockFile(int fd) { + if (fd < 0) return false; + + if (taosLogFileNum > 1) { + int ret = flock(fd, LOCK_EX | LOCK_NB); + if (ret == 0) { + return true; + } + } + + return false; +} + +void taosUnLockFile(int fd) { + if (fd < 0) return; + + if (taosLogFileNum > 1) { + flock(fd, LOCK_UN | LOCK_NB); + } +} + +void *taosThreadToOpenNewFile(void *param) { + char name[LOG_FILE_NAME_LEN]; + + taosLogFlag ^= 1; + taosLogLines = 0; + sprintf(name, "%s.%d", taosLogName, taosLogFlag); + + umask(0); + + int fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + taosLockFile(fd); + lseek(fd, 0, SEEK_SET); + + int oldFd = logHandle->fd; + logHandle->fd = fd; + taosLogLines = 0; + openInProgress = 0; + pPrint("new log file is opened!!!"); + + taosCloseLogByFd(oldFd); + return NULL; +} + +int taosOpenNewLogFile() { + pthread_mutex_lock(&logMutex); + + if (taosLogLines > taosLogMaxLines && openInProgress == 0) { + openInProgress = 1; + + pPrint("open new log file ......"); + pthread_t thread; + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + pthread_create(&thread, &attr, taosThreadToOpenNewFile, NULL); + pthread_attr_destroy(&attr); + } + + pthread_mutex_unlock(&logMutex); + + return 0; +} + +void taosResetLogFile() { + char lastName[LOG_FILE_NAME_LEN]; + sprintf(lastName, "%s.%d", taosLogName, taosLogFlag); + + // force create a new log file + taosLogLines = taosLogMaxLines + 10; + + taosOpenNewLogFile(); + remove(lastName); + + pPrint("=================================="); + pPrint(" reset log file "); +} + +bool taosCheckFileIsOpen(char *logFileName) { + int exist = access(logFileName, F_OK); + if (exist != 0) { + return false; + } + + int fd = open(logFileName, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + if (fd < 0) { + printf("failed to open log file:%s, reason:%s\n", logFileName, strerror(errno)); + return true; + } + + if (taosLockFile(fd)) { + taosUnLockFile(fd); + tclose(fd); + return false; + } else { + tclose(fd); + return true; + } +} + +void taosGetLogFileName(char *fn) { + if (taosLogFileNum > 1) { + for (int i = 0; i < taosLogFileNum; i++) { + char fileName[LOG_FILE_NAME_LEN]; + + sprintf(fileName, "%s%d.0", fn, i); + bool file1open = taosCheckFileIsOpen(fileName); + + sprintf(fileName, "%s%d.1", fn, i); + bool file2open = taosCheckFileIsOpen(fileName); + + if (!file1open && !file2open) { + sprintf(taosLogName, "%s%d", fn, i); + return; + } + } + } + + strcpy(taosLogName, fn); +} + +int taosOpenLogFileWithMaxLines(char *fn, int maxLines, int maxFileNum) { + char name[LOG_FILE_NAME_LEN] = "\0"; + struct stat logstat0, logstat1; + int size; + + taosLogMaxLines = maxLines; + taosLogFileNum = maxFileNum; + taosGetLogFileName(fn); + + strcpy(name, fn); + strcat(name, ".0"); + + // if none of the log files exist, open 0, if both exists, open the old one + if (stat(name, &logstat0) < 0) { + taosLogFlag = 0; + } else { + strcpy(name, fn); + strcat(name, ".1"); + if (stat(name, &logstat1) < 0) { + taosLogFlag = 1; + } else { + taosLogFlag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1; + } + } + + sprintf(name, "%s.%d", taosLogName, taosLogFlag); + pthread_mutex_init(&logMutex, NULL); + + umask(0); + logHandle->fd = open(name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + + if (logHandle->fd < 0) { + printf("failed to open log file:%s, reason:%s\n", name, strerror(errno)); + return -1; + } + taosLockFile(logHandle->fd); + + // only an estimate for number of lines + struct stat filestat; + fstat(logHandle->fd, &filestat); + size = (int)filestat.st_size; + taosLogLines = size / 60; + + lseek(logHandle->fd, 0, SEEK_END); + + sprintf(name, "==================================================\n"); + write(logHandle->fd, name, (uint32_t)strlen(name)); + sprintf(name, " new log file \n"); + write(logHandle->fd, name, (uint32_t)strlen(name)); + sprintf(name, "==================================================\n"); + write(logHandle->fd, name, (uint32_t)strlen(name)); + + return 0; +} + +char *tprefix(char *prefix) { + struct tm Tm, *ptm; + struct timeval timeSecs; + time_t curTime; + + gettimeofday(&timeSecs, NULL); + curTime = timeSecs.tv_sec; + ptm = localtime_r(&curTime, &Tm); + + sprintf(prefix, "%02d/%02d %02d:%02d:%02d.%06d 0x%lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + return prefix; +} + +void tprintf(const char *const flags, int dflag, const char *const format, ...) { + va_list argpointer; + char buffer[MAX_LOGLINE_SIZE + 10] = {0}; + int len; + struct tm Tm, *ptm; + struct timeval timeSecs; + time_t curTime; + + gettimeofday(&timeSecs, NULL); + curTime = timeSecs.tv_sec; + ptm = localtime_r(&curTime, &Tm); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + len += sprintf(buffer + len, "%s", flags); + + va_start(argpointer, format); + len += vsnprintf(buffer + len, 900, format, argpointer); + va_end(argpointer); + + if (len > MAX_LOGLINE_SIZE) len = MAX_LOGLINE_SIZE; + + buffer[len++] = '\n'; + buffer[len] = 0; + + if ((dflag & DEBUG_FILE) && logHandle && logHandle->fd >= 0) { + if (tsAsyncLog) { + taosPushLogBuffer(logHandle, buffer, len); + } else { + write(logHandle->fd, buffer, len); + } + + if (taosLogMaxLines > 0) { + __sync_fetch_and_add(&taosLogLines, 1); + + if ((taosLogLines > taosLogMaxLines) && (openInProgress == 0)) taosOpenNewLogFile(); + } + } + + if (dflag & DEBUG_SCREEN) write(1, buffer, (unsigned int)len); +} + +void taosDumpData(unsigned char *msg, int len) { + char temp[256]; + int i, pos = 0, c = 0; + + for (i = 0; i < len; ++i) { + sprintf(temp + pos, "%02x ", msg[i]); + c++; + pos += 3; + if (c >= 16) { + temp[pos++] = '\n'; + write(logHandle->fd, temp, (unsigned int)pos); + c = 0; + pos = 0; + } + } + + temp[pos++] = '\n'; + + write(logHandle->fd, temp, (unsigned int)pos); + + return; +} + +void taosPrintLongString(const char *const flags, int dflag, const char *const format, ...) { + va_list argpointer; + char buffer[65 * 1024 + 10]; + int len; + struct tm Tm, *ptm; + struct timeval timeSecs; + time_t curTime; + + gettimeofday(&timeSecs, NULL); + curTime = timeSecs.tv_sec; + ptm = localtime_r(&curTime, &Tm); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); + len += sprintf(buffer + len, "%s", flags); + + va_start(argpointer, format); + len += vsnprintf(buffer + len, 64 * 1024, format, argpointer); + va_end(argpointer); + + if (len > 64 * 1024) len = 64 * 1024; + + buffer[len++] = '\n'; + buffer[len] = 0; + + if ((dflag & DEBUG_FILE) && logHandle && logHandle->fd >= 0) { + taosPushLogBuffer(logHandle, buffer, len); + + if (taosLogMaxLines > 0) { + __sync_fetch_and_add(&taosLogLines, 1); + + if ((taosLogLines > taosLogMaxLines) && (openInProgress == 0)) taosOpenNewLogFile(); + } + } + + if (dflag & DEBUG_SCREEN) write(1, buffer, (unsigned int)len); +} + +void taosCloseLog() { taosCloseLogByFd(logHandle->fd); } + +void taosCloseLogByFd(int fd) { + if (fd >= 0) { + taosUnLockFile(fd); + tclose(fd); + } +} + +#define LOG_BUF_BUFFER(x) ((x)->buffer) +#define LOG_BUF_START(x) ((x)->buffStart) +#define LOG_BUF_END(x) ((x)->buffEnd) +#define LOG_BUF_SIZE(x) ((x)->buffSize) +#define LOG_BUF_MUTEX(x) ((x)->buffMutex) + +SLogBuff *taosLogBuffNew(int bufSize) { + SLogBuff *tLogBuff = NULL; + + if (bufSize < TSDB_MIN_LOG_BUF_SIZE || bufSize > TSDB_MAX_LOG_BUF_SIZE) return NULL; + + tLogBuff = calloc(1, sizeof(SLogBuff)); + if (tLogBuff == NULL) return NULL; + + LOG_BUF_BUFFER(tLogBuff) = malloc(bufSize); + if (LOG_BUF_BUFFER(tLogBuff) == NULL) goto _err; + + LOG_BUF_START(tLogBuff) = LOG_BUF_END(tLogBuff) = 0; + LOG_BUF_SIZE(tLogBuff) = bufSize; + tLogBuff->stop = 0; + + if (pthread_mutex_init(&LOG_BUF_MUTEX(tLogBuff), NULL) < 0) goto _err; + sem_init(&(tLogBuff->buffNotEmpty), 0, 0); + + return tLogBuff; + +_err: + tfree(LOG_BUF_BUFFER(tLogBuff)); + tfree(tLogBuff); + return NULL; +} + +void taosLogBuffDestroy(SLogBuff *tLogBuff) { + sem_destroy(&(tLogBuff->buffNotEmpty)); + pthread_mutex_destroy(&(tLogBuff->buffMutex)); + free(tLogBuff->buffer); + tfree(tLogBuff); +} + +int taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int msgLen) { + int start = 0; + int end = 0; + int remainSize = 0; + + if (tLogBuff == NULL || tLogBuff->stop) return -1; + + pthread_mutex_lock(&LOG_BUF_MUTEX(tLogBuff)); + start = LOG_BUF_START(tLogBuff); + end = LOG_BUF_END(tLogBuff); + + remainSize = (start > end) ? (end - start - 1) : (start + LOG_BUF_SIZE(tLogBuff) - end - 1); + + if (remainSize <= msgLen) { + pthread_mutex_unlock(&LOG_BUF_MUTEX(tLogBuff)); + return -1; + } + + if (start > end) { + memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, msgLen); + } else { + if (LOG_BUF_SIZE(tLogBuff) - end < msgLen) { + memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, LOG_BUF_SIZE(tLogBuff) - end); + memcpy(LOG_BUF_BUFFER(tLogBuff), msg + LOG_BUF_SIZE(tLogBuff) - end, msgLen - LOG_BUF_SIZE(tLogBuff) + end); + } else { + memcpy(LOG_BUF_BUFFER(tLogBuff) + end, msg, msgLen); + } + } + LOG_BUF_END(tLogBuff) = (LOG_BUF_END(tLogBuff) + msgLen) % LOG_BUF_SIZE(tLogBuff); + + // TODO : put string in the buffer + + sem_post(&(tLogBuff->buffNotEmpty)); + + pthread_mutex_unlock(&LOG_BUF_MUTEX(tLogBuff)); + + return 0; +} + +int taosPollLogBuffer(SLogBuff *tLogBuff, char *buf, int bufSize) { + int start = LOG_BUF_START(tLogBuff); + int end = LOG_BUF_END(tLogBuff); + int pollSize = 0; + + if (start == end) { + return 0; + } else if (start < end) { + pollSize = MIN(end - start, bufSize); + + memcpy(buf, LOG_BUF_BUFFER(tLogBuff) + start, pollSize); + return pollSize; + } else { + pollSize = MIN(end + LOG_BUF_SIZE(tLogBuff) - start, bufSize); + if (pollSize > LOG_BUF_SIZE(tLogBuff) - start) { + int tsize = LOG_BUF_SIZE(tLogBuff) - start; + memcpy(buf, LOG_BUF_BUFFER(tLogBuff) + start, tsize); + memcpy(buf + tsize, LOG_BUF_BUFFER(tLogBuff), pollSize - tsize); + + } else { + memcpy(buf, LOG_BUF_BUFFER(tLogBuff) + start, pollSize); + } + return pollSize; + } +} + +void *taosAsyncOutputLog(void *param) { + SLogBuff *tLogBuff = (SLogBuff *)param; + int log_size = 0; + + char tempBuffer[TSDB_DEFAULT_LOG_BUF_UNIT]; + + while (1) { + sem_wait(&(tLogBuff->buffNotEmpty)); + + // Polling the buffer + while (1) { + log_size = taosPollLogBuffer(tLogBuff, tempBuffer, TSDB_DEFAULT_LOG_BUF_UNIT); + if (log_size) { + write(tLogBuff->fd, tempBuffer, log_size); + LOG_BUF_START(tLogBuff) = (LOG_BUF_START(tLogBuff) + log_size) % LOG_BUF_SIZE(tLogBuff); + } else { + break; + } + } + + if (tLogBuff->stop) break; + } + + return NULL; +} diff --git a/src/util/src/tlosertree.c b/src/util/src/tlosertree.c new file mode 100644 index 000000000000..7da03347b10c --- /dev/null +++ b/src/util/src/tlosertree.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "taosmsg.h" +#include "tlog.h" +#include "tlosertree.h" + +// set initial value for loser tree +void tLoserTreeInit(SLoserTreeInfo* pTree) { + assert((pTree->totalEntries & 0x01) == 0 && (pTree->numOfEntries << 1 == pTree->totalEntries)); + + for (int32_t i = 0; i < pTree->totalEntries; ++i) { + if (i < pTree->numOfEntries) { + pTree->pNode[i].index = -1; + } else { + pTree->pNode[i].index = i - pTree->numOfEntries; + } + } +} + +/* + * display whole loser tree on screen for debug purpose only. + */ +void tLoserTreeDisplay(SLoserTreeInfo* pTree) { + printf("the value of loser tree:\t"); + for (int32_t i = 0; i < pTree->totalEntries; ++i) printf("%d\t", pTree->pNode[i].index); + printf("\n"); +} + +uint8_t tLoserTreeCreate(SLoserTreeInfo** pTree, int32_t numOfEntries, void* param, __merge_compare_fn_t compareFn) { + int32_t totalEntries = numOfEntries << 1; + + *pTree = (SLoserTreeInfo*)calloc(1, sizeof(SLoserTreeInfo) + sizeof(SLoserTreeNode) * totalEntries); + if ((*pTree) == NULL) { + pError("allocate memory for losertree failed. out of memory"); + return TSDB_CODE_CLI_OUT_OF_MEMORY; + } + + (*pTree)->pNode = (SLoserTreeNode*)(((char*)(*pTree)) + sizeof(SLoserTreeInfo)); + + (*pTree)->numOfEntries = numOfEntries; + (*pTree)->totalEntries = totalEntries; + (*pTree)->param = param; + (*pTree)->comparaFn = compareFn; + + // set initial value for loser tree + tLoserTreeInit(*pTree); + +#ifdef _DEBUG_VIEW + printf("the initial value of loser tree:\n"); + tLoserTreeDisplay(*pTree); +#endif + + for (int32_t i = totalEntries - 1; i >= numOfEntries; i--) { + tLoserTreeAdjust(*pTree, i); + } + +#if defined(_DEBUG_VIEW) + printf("after adjust:\n"); + tLoserTreeDisplay(*pTree); + printf("initialize local reducer completed!\n"); +#endif + + return TSDB_CODE_SUCCESS; +} + +void tLoserTreeAdjust(SLoserTreeInfo* pTree, int32_t idx) { + assert(idx <= pTree->totalEntries - 1 && idx >= pTree->numOfEntries && pTree->totalEntries >= 2); + + if (pTree->totalEntries == 2) { + pTree->pNode[0].index = 0; + pTree->pNode[1].index = 0; + return; + } + + int32_t parentId = idx >> 1; + SLoserTreeNode kLeaf = pTree->pNode[idx]; + + while (parentId > 0) { + if (pTree->pNode[parentId].index == -1) { + pTree->pNode[parentId] = kLeaf; + return; + } + + int32_t ret = pTree->comparaFn(&pTree->pNode[parentId], &kLeaf, pTree->param); + if (ret < 0) { + SLoserTreeNode t = pTree->pNode[parentId]; + pTree->pNode[parentId] = kLeaf; + kLeaf = t; + } + + parentId = parentId >> 1; + } + + if (memcmp(&kLeaf, &pTree->pNode[1], sizeof(kLeaf)) != 0) { + // winner cannot be identical to the loser, which is pTreeNode[1] + pTree->pNode[0] = kLeaf; + } +} + +void tLoserTreeRebuild(SLoserTreeInfo* pTree) { + assert((pTree->totalEntries & 0x1) == 0); + + tLoserTreeInit(pTree); + for (int32_t i = pTree->totalEntries - 1; i >= pTree->numOfEntries; i--) { + tLoserTreeAdjust(pTree, i); + } +} diff --git a/src/util/src/tmd5.c b/src/util/src/tmd5.c new file mode 100644 index 000000000000..2ccff1e22692 --- /dev/null +++ b/src/util/src/tmd5.c @@ -0,0 +1,269 @@ +/* + *********************************************************************** + ** Message-digest routines: ** + ** To form the message digest for a message M ** + ** (1) Initialize a context buffer mdContext using MD5Init ** + ** (2) Call MD5Update on mdContext and M ** + ** (3) Call MD5Final on mdContext ** + ** The message digest is now in mdContext->digest[0...15] ** + *********************************************************************** + */ + +/* + *********************************************************************** + ** Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. ** + ** ** + ** License to copy and use this software is granted provided that ** + ** it is identified as the "RSA Data Security, Inc. MD5 Message- ** + ** Digest Algorithm" in all material mentioning or referencing this ** + ** software or this function. ** + ** ** + ** License is also granted to make and use derivative works ** + ** provided that such works are identified as "derived from the RSA ** + ** Data Security, Inc. MD5 Message-Digest Algorithm" in all ** + ** material mentioning or referencing the derived work. ** + ** ** + ** RSA Data Security, Inc. makes no representations concerning ** + ** either the merchantability of this software or the suitability ** + ** of this software for any particular purpose. It is provided "as ** + ** is" without express or implied warranty of any kind. ** + ** ** + ** These notices must be retained in any copies of any part of this ** + ** documentation and/or software. ** + *********************************************************************** + */ + +#include "tmd5.h" +#include +#include +#include +#include "tsdb.h" + +/* forward declaration */ +static void Transform(uint32_t *buf, uint32_t *in); + +static uint8_t PADDING[64] = {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +/* F, G, H and I are basic MD5 functions */ +#define F(x, y, z) (((x) & (y)) | ((~x) & (z))) +#define G(x, y, z) (((x) & (z)) | ((y) & (~z))) +#define H(x, y, z) ((x) ^ (y) ^ (z)) +#define I(x, y, z) ((y) ^ ((x) | (~z))) + +/* ROTATE_LEFT rotates x left n bits */ +#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) + +/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */ +/* Rotation is separate from addition to prevent recomputation */ +#define FF(a, b, c, d, x, s, ac) \ + { \ + (a) += F((b), (c), (d)) + (x) + (uint32_t)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ + } +#define GG(a, b, c, d, x, s, ac) \ + { \ + (a) += G((b), (c), (d)) + (x) + (uint32_t)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ + } +#define HH(a, b, c, d, x, s, ac) \ + { \ + (a) += H((b), (c), (d)) + (x) + (uint32_t)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ + } +#define II(a, b, c, d, x, s, ac) \ + { \ + (a) += I((b), (c), (d)) + (x) + (uint32_t)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ + } + +/* The routine MD5Init initializes the message-digest context + mdContext. All fields are set to zero. + */ +void MD5Init(MD5_CTX *mdContext) { + mdContext->i[0] = mdContext->i[1] = (uint32_t)0; + + /* Load magic initialization constants. + */ + mdContext->buf[0] = (uint32_t)0x67452301; + mdContext->buf[1] = (uint32_t)0xefcdab89; + mdContext->buf[2] = (uint32_t)0x98badcfe; + mdContext->buf[3] = (uint32_t)0x10325476; +} + +/* The routine MD5Update updates the message-digest context to +account for the presence of each of the characters inBuf[0..inLen-1] +in the message whose digest is being computed. +*/ +void MD5Update(MD5_CTX *mdContext, uint8_t *inBuf, unsigned int inLen) { + uint32_t in[16]; + int mdi; + unsigned int i, ii; + + /* compute number of bytes mod 64 */ + mdi = (int)((mdContext->i[0] >> 3) & 0x3F); + + /* update number of bits */ + if ((mdContext->i[0] + ((uint32_t)inLen << 3)) < mdContext->i[0]) mdContext->i[1]++; + mdContext->i[0] += ((uint32_t)inLen << 3); + mdContext->i[1] += ((uint32_t)inLen >> 29); + + while (inLen--) { + /* add new character to buffer, increment mdi */ + mdContext->in[mdi++] = *inBuf++; + + /* transform if necessary */ + if (mdi == 0x40) { + for (i = 0, ii = 0; i < 16; i++, ii += 4) + in[i] = (((uint32_t)mdContext->in[ii + 3]) << 24) | (((uint32_t)mdContext->in[ii + 2]) << 16) | + (((uint32_t)mdContext->in[ii + 1]) << 8) | ((uint32_t)mdContext->in[ii]); + Transform(mdContext->buf, in); + mdi = 0; + } + } +} + +/* The routine MD5Final terminates the message-digest computation and +ends with the desired message digest in mdContext->digest[0...15]. +*/ +void MD5Final(MD5_CTX *mdContext) { + uint32_t in[16]; + int mdi; + unsigned int i, ii; + unsigned int padLen; + + /* save number of bits */ + in[14] = mdContext->i[0]; + in[15] = mdContext->i[1]; + + /* compute number of bytes mod 64 */ + mdi = (int)((mdContext->i[0] >> 3) & 0x3F); + + /* pad out to 56 mod 64 */ + padLen = (mdi < 56) ? (56 - mdi) : (120 - mdi); + MD5Update(mdContext, PADDING, padLen); + + /* append length in bits and transform */ + for (i = 0, ii = 0; i < 14; i++, ii += 4) + in[i] = (((uint32_t)mdContext->in[ii + 3]) << 24) | (((uint32_t)mdContext->in[ii + 2]) << 16) | + (((uint32_t)mdContext->in[ii + 1]) << 8) | ((uint32_t)mdContext->in[ii]); + Transform(mdContext->buf, in); + + /* store buffer in digest */ + for (i = 0, ii = 0; i < 4; i++, ii += 4) { + mdContext->digest[ii] = (uint8_t)(mdContext->buf[i] & 0xFF); + mdContext->digest[ii + 1] = (uint8_t)((mdContext->buf[i] >> 8) & 0xFF); + mdContext->digest[ii + 2] = (uint8_t)((mdContext->buf[i] >> 16) & 0xFF); + mdContext->digest[ii + 3] = (uint8_t)((mdContext->buf[i] >> 24) & 0xFF); + } +} + +/* Basic MD5 step. Transforms buf based on in. + */ +static void Transform(uint32_t *buf, uint32_t *in) { + uint32_t a = buf[0], b = buf[1], c = buf[2], d = buf[3]; + +/* Round 1 */ +#define S11 7 +#define S12 12 +#define S13 17 +#define S14 22 + + FF(a, b, c, d, in[0], S11, 3614090360U); /* 1 */ + FF(d, a, b, c, in[1], S12, 3905402710U); /* 2 */ + FF(c, d, a, b, in[2], S13, 606105819U); /* 3 */ + FF(b, c, d, a, in[3], S14, 3250441966U); /* 4 */ + FF(a, b, c, d, in[4], S11, 4118548399U); /* 5 */ + FF(d, a, b, c, in[5], S12, 1200080426U); /* 6 */ + FF(c, d, a, b, in[6], S13, 2821735955U); /* 7 */ + FF(b, c, d, a, in[7], S14, 4249261313U); /* 8 */ + FF(a, b, c, d, in[8], S11, 1770035416U); /* 9 */ + FF(d, a, b, c, in[9], S12, 2336552879U); /* 10 */ + FF(c, d, a, b, in[10], S13, 4294925233U); /* 11 */ + FF(b, c, d, a, in[11], S14, 2304563134U); /* 12 */ + FF(a, b, c, d, in[12], S11, 1804603682U); /* 13 */ + FF(d, a, b, c, in[13], S12, 4254626195U); /* 14 */ + FF(c, d, a, b, in[14], S13, 2792965006U); /* 15 */ + FF(b, c, d, a, in[15], S14, 1236535329U); /* 16 */ + +/* Round 2 */ +#define S21 5 +#define S22 9 +#define S23 14 +#define S24 20 + + GG(a, b, c, d, in[1], S21, 4129170786U); /* 17 */ + GG(d, a, b, c, in[6], S22, 3225465664U); /* 18 */ + GG(c, d, a, b, in[11], S23, 643717713U); /* 19 */ + GG(b, c, d, a, in[0], S24, 3921069994U); /* 20 */ + GG(a, b, c, d, in[5], S21, 3593408605U); /* 21 */ + GG(d, a, b, c, in[10], S22, 38016083U); /* 22 */ + GG(c, d, a, b, in[15], S23, 3634488961U); /* 23 */ + GG(b, c, d, a, in[4], S24, 3889429448U); /* 24 */ + GG(a, b, c, d, in[9], S21, 568446438U); /* 25 */ + GG(d, a, b, c, in[14], S22, 3275163606U); /* 26 */ + GG(c, d, a, b, in[3], S23, 4107603335U); /* 27 */ + GG(b, c, d, a, in[8], S24, 1163531501U); /* 28 */ + GG(a, b, c, d, in[13], S21, 2850285829U); /* 29 */ + GG(d, a, b, c, in[2], S22, 4243563512U); /* 30 */ + GG(c, d, a, b, in[7], S23, 1735328473U); /* 31 */ + GG(b, c, d, a, in[12], S24, 2368359562U); /* 32 */ + +/* Round 3 */ +#define S31 4 +#define S32 11 +#define S33 16 +#define S34 23 + + HH(a, b, c, d, in[5], S31, 4294588738U); /* 33 */ + HH(d, a, b, c, in[8], S32, 2272392833U); /* 34 */ + HH(c, d, a, b, in[11], S33, 1839030562U); /* 35 */ + HH(b, c, d, a, in[14], S34, 4259657740U); /* 36 */ + HH(a, b, c, d, in[1], S31, 2763975236U); /* 37 */ + HH(d, a, b, c, in[4], S32, 1272893353U); /* 38 */ + HH(c, d, a, b, in[7], S33, 4139469664U); /* 39 */ + HH(b, c, d, a, in[10], S34, 3200236656U); /* 40 */ + HH(a, b, c, d, in[13], S31, 681279174U); /* 41 */ + HH(d, a, b, c, in[0], S32, 3936430074U); /* 42 */ + HH(c, d, a, b, in[3], S33, 3572445317U); /* 43 */ + HH(b, c, d, a, in[6], S34, 76029189U); /* 44 */ + HH(a, b, c, d, in[9], S31, 3654602809U); /* 45 */ + HH(d, a, b, c, in[12], S32, 3873151461U); /* 46 */ + HH(c, d, a, b, in[15], S33, 530742520U); /* 47 */ + HH(b, c, d, a, in[2], S34, 3299628645U); /* 48 */ + +/* Round 4 */ +#define S41 6 +#define S42 10 +#define S43 15 +#define S44 21 + + II(a, b, c, d, in[0], S41, 4096336452U); /* 49 */ + II(d, a, b, c, in[7], S42, 1126891415U); /* 50 */ + II(c, d, a, b, in[14], S43, 2878612391U); /* 51 */ + II(b, c, d, a, in[5], S44, 4237533241U); /* 52 */ + II(a, b, c, d, in[12], S41, 1700485571U); /* 53 */ + II(d, a, b, c, in[3], S42, 2399980690U); /* 54 */ + II(c, d, a, b, in[10], S43, 4293915773U); /* 55 */ + II(b, c, d, a, in[1], S44, 2240044497U); /* 56 */ + II(a, b, c, d, in[8], S41, 1873313359U); /* 57 */ + II(d, a, b, c, in[15], S42, 4264355552U); /* 58 */ + II(c, d, a, b, in[6], S43, 2734768916U); /* 59 */ + II(b, c, d, a, in[13], S44, 1309151649U); /* 60 */ + II(a, b, c, d, in[4], S41, 4149444226U); /* 61 */ + II(d, a, b, c, in[11], S42, 3174756917U); /* 62 */ + II(c, d, a, b, in[2], S43, 718787259U); /* 63 */ + II(b, c, d, a, in[9], S44, 3951481745U); /* 64 */ + + buf[0] += a; + buf[1] += b; + buf[2] += c; + buf[3] += d; +} diff --git a/src/util/src/tmempool.c b/src/util/src/tmempool.c new file mode 100644 index 000000000000..4c8e00c3b588 --- /dev/null +++ b/src/util/src/tmempool.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "tlog.h" +#include "tmempool.h" +#include "tutil.h" + +typedef struct { + int numOfFree; /* number of free slots */ + int first; /* the first free slot */ + int numOfBlock; /* the number of blocks */ + int blockSize; /* block size in bytes */ + int * freeList; /* the index list */ + char * pool; /* the actual mem block */ + pthread_mutex_t mutex; +} pool_t; + +mpool_h taosMemPoolInit(int numOfBlock, int blockSize) { + int i; + pool_t *pool_p; + + if (numOfBlock <= 1 || blockSize <= 1) { + pError("invalid parameter in memPoolInit\n"); + return NULL; + } + + pool_p = (pool_t *)malloc(sizeof(pool_t)); + if (pool_p == NULL) { + pError("mempool malloc failed\n"); + return NULL; + } else { + memset(pool_p, 0, sizeof(pool_t)); + } + + pool_p->blockSize = blockSize; + pool_p->numOfBlock = numOfBlock; + pool_p->pool = (char *)malloc((size_t)(blockSize * numOfBlock)); + pool_p->freeList = (int *)malloc(sizeof(int) * (size_t)numOfBlock); + + if (pool_p->pool == NULL || pool_p->freeList == NULL) { + pError("failed to allocate memory\n"); + free(pool_p->freeList); + free(pool_p->pool); + free(pool_p); + return NULL; + } + + pthread_mutex_init(&(pool_p->mutex), NULL); + + for (i = 0; i < pool_p->numOfBlock; ++i) pool_p->freeList[i] = i; + + pool_p->first = 0; + pool_p->numOfFree = pool_p->numOfBlock; + + return (mpool_h)pool_p; +} + +char *taosMemPoolMalloc(mpool_h handle) { + char * pos = NULL; + pool_t *pool_p = (pool_t *)handle; + + pthread_mutex_lock(&(pool_p->mutex)); + + if (pool_p->numOfFree <= 0) { + pError("mempool: out of memory"); + + } else { + pos = pool_p->pool + pool_p->blockSize * (pool_p->freeList[pool_p->first]); + pool_p->first++; + pool_p->first = pool_p->first % pool_p->numOfBlock; + pool_p->numOfFree--; + } + + pthread_mutex_unlock(&(pool_p->mutex)); + if (pos != NULL) memset(pos, 0, (size_t)pool_p->blockSize); + return pos; +} + +void taosMemPoolFree(mpool_h handle, char *pMem) { + int index; + pool_t *pool_p = (pool_t *)handle; + + if (pMem == NULL) return; + + pthread_mutex_lock(&pool_p->mutex); + + index = (int)(pMem - pool_p->pool) % pool_p->blockSize; + + if (index != 0) { + pError("invalid free address:%p\n", pMem); + } else { + index = (int)((pMem - pool_p->pool) / pool_p->blockSize); + + if (index < 0 || index >= pool_p->numOfBlock) { + pError("mempool: error, invalid address:%p\n", pMem); + } else { + pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = index; + pool_p->numOfFree++; + memset(pMem, 0, (size_t)pool_p->blockSize); + } + } + + pthread_mutex_unlock(&pool_p->mutex); +} + +void taosMemPoolCleanUp(mpool_h handle) { + pool_t *pool_p = (pool_t *)handle; + + pthread_mutex_destroy(&pool_p->mutex); + if (pool_p->pool) free(pool_p->pool); + if (pool_p->freeList) free(pool_p->freeList); + memset(&pool_p, 0, sizeof(pool_p)); + free(pool_p); +} diff --git a/src/util/src/tmodule.c b/src/util/src/tmodule.c new file mode 100644 index 000000000000..ef4495af701f --- /dev/null +++ b/src/util/src/tmodule.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "tmodule.h" +#include "tutil.h" + +void *taosProcessQueue(void *param); + +char *taosDisplayModuleStatus(int moduleNum) { + static char status[256]; + int i; + + status[0] = 0; + + for (i = 1; i < moduleNum; ++i) + if (taosCheckPthreadValid(moduleObj[i].thread)) sprintf(status + strlen(status), "%s ", moduleObj[i].name); + + if (status[0] == 0) + sprintf(status, "all module is down"); + else + sprintf(status, " is(are) up"); + + return status; +} + +int taosInitModule(module_t *pMod) { + pthread_attr_t attr; + + if (pthread_mutex_init(&pMod->queueMutex, NULL) < 0) { + printf("ERROR: init %s queueMutex failed, reason:%s\n", pMod->name, strerror(errno)); + taosCleanUpModule(pMod); + return -1; + } + + if (pthread_mutex_init(&pMod->stmMutex, NULL) < 0) { + printf("ERROR: init %s stmMutex failed, reason:%s\n", pMod->name, strerror(errno)); + taosCleanUpModule(pMod); + return -1; + } + + if (sem_init(&pMod->emptySem, 0, (unsigned int)pMod->queueSize) != 0) { + printf("ERROR: init %s empty semaphore failed, reason:%s\n", pMod->name, strerror(errno)); + taosCleanUpModule(pMod); + return -1; + } + + if (sem_init(&pMod->fullSem, 0, 0) != 0) { + printf("ERROR: init %s full semaphore failed, reason:%s\n", pMod->name, strerror(errno)); + taosCleanUpModule(pMod); + return -1; + } + + if ((pMod->queue = (msg_t *)malloc((size_t)pMod->queueSize * sizeof(msg_t))) == NULL) { + printf("ERROR: %s no enough memory, reason:%s\n", pMod->name, strerror(errno)); + taosCleanUpModule(pMod); + return -1; + } + + memset(pMod->queue, 0, (size_t)pMod->queueSize * sizeof(msg_t)); + pMod->fullSlot = 0; + pMod->emptySlot = 0; + + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pMod->thread, &attr, taosProcessQueue, (void *)pMod) != 0) { + printf("ERROR: %s failed to create thread, reason:%s\n", pMod->name, strerror(errno)); + taosCleanUpModule(pMod); + return -1; + } + + if (pMod->init) return (*(pMod->init))(); + + return 0; +} + +void *taosProcessQueue(void *param) { + msg_t msg; + module_t *pMod = (module_t *)param; + int oldType; + + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldType); + + signal(SIGINT, SIG_IGN); + + while (1) { + if (sem_wait(&pMod->fullSem) != 0) + printf("ERROR: wait %s fullSem failed, reason:%s\n", pMod->name, strerror(errno)); + + if (pthread_mutex_lock(&pMod->queueMutex) != 0) + printf("ERROR: lock %s queueMutex failed, reason:%s\n", pMod->name, strerror(errno)); + + msg = pMod->queue[pMod->fullSlot]; + memset(&(pMod->queue[pMod->fullSlot]), 0, sizeof(msg_t)); + pMod->fullSlot = (pMod->fullSlot + 1) % pMod->queueSize; + + if (pthread_mutex_unlock(&pMod->queueMutex) != 0) + printf("ERROR: unlock %s queueMutex failed, reason:%s\n", pMod->name, strerror(errno)); + + if (sem_post(&pMod->emptySem) != 0) + printf("ERROR: post %s emptySem failed, reason:%s\n", pMod->name, strerror(errno)); + + /* process the message */ + if (msg.cid < 0 || msg.cid >= maxCid) { + /*printf("ERROR: cid:%d is out of range, msg is discarded\n", msg.cid);*/ + continue; + } + + /* + if ( pthread_mutex_lock ( &(pMod->stmMutex)) != 0 ) + printf("ERROR: lock %s stmMutex failed, reason:%s\n", pMod->name, + strerror(errno)); + */ + (*(pMod->processMsg))(&msg); + + tfree(msg.msg); + /* + if ( pthread_mutex_unlock ( &(pMod->stmMutex)) != 0 ) + printf("ERROR: unlock %s stmMutex failed, reason:%s\n", pMod->name, + strerror(errno)); + */ + } +} + +int taosSendMsgToModule(module_t *pMod, int cid, int mid, int tid, char *msg) { + if (sem_wait(&pMod->emptySem) != 0) + printf("ERROR: wait %s emptySem failed, reason:%s\n", pMod->name, strerror(errno)); + + if (pthread_mutex_lock(&pMod->queueMutex) != 0) + printf("ERROR: lock %s queueMutex failed, reason:%s\n", pMod->name, strerror(errno)); + + pMod->queue[pMod->emptySlot].cid = cid; + pMod->queue[pMod->emptySlot].mid = mid; + pMod->queue[pMod->emptySlot].tid = tid; + pMod->queue[pMod->emptySlot].msg = msg; + pMod->emptySlot = (pMod->emptySlot + 1) % pMod->queueSize; + + if (pthread_mutex_unlock(&pMod->queueMutex) != 0) + printf("ERROR: unlock %s queueMutex failed, reason:%s\n", pMod->name, strerror(errno)); + + if (sem_post(&pMod->fullSem) != 0) printf("ERROR: post %s fullSem failed, reason:%s\n", pMod->name, strerror(errno)); + + return 0; +} + +void taosCleanUpModule(module_t *pMod) { + int i; + + if (pMod->cleanUp) pMod->cleanUp(); + + if (taosCheckPthreadValid(pMod->thread)) { + pthread_cancel(pMod->thread); + pthread_join(pMod->thread, NULL); + } + + taosResetPthread(&pMod->thread); + sem_destroy(&pMod->emptySem); + sem_destroy(&pMod->fullSem); + pthread_mutex_destroy(&pMod->queueMutex); + pthread_mutex_destroy(&pMod->stmMutex); + + for (i = 0; i < pMod->queueSize; ++i) { + tfree(pMod->queue[i].msg); + } + + tfree(pMod->queue); + + memset(pMod, 0, sizeof(module_t)); +} diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c new file mode 100644 index 000000000000..bf2d07dd59ab --- /dev/null +++ b/src/util/src/tsched.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tlog.h" +#include "tsched.h" + +typedef struct { + char label[16]; + sem_t emptySem; + sem_t fullSem; + pthread_mutex_t queueMutex; + int fullSlot; + int emptySlot; + int queueSize; + int numOfThreads; + pthread_t * qthread; + SSchedMsg * queue; +} SSchedQueue; + +void (*taosSchedFp[128])(SSchedMsg *msg) = {0}; +void *taosProcessSchedQueue(void *param); +void taosCleanUpScheduler(void *param); + +void *taosInitScheduler(int queueSize, int numOfThreads, char *label) { + pthread_attr_t attr; + SSchedQueue * pSched = (SSchedQueue *)malloc(sizeof(SSchedQueue)); + + memset(pSched, 0, sizeof(SSchedQueue)); + pSched->queueSize = queueSize; + pSched->numOfThreads = numOfThreads; + strcpy(pSched->label, label); + + if (pthread_mutex_init(&pSched->queueMutex, NULL) < 0) { + pError("init %s:queueMutex failed, reason:%s", pSched->label, strerror(errno)); + goto _error; + } + + if (sem_init(&pSched->emptySem, 0, (unsigned int)pSched->queueSize) != 0) { + pError("init %s:empty semaphore failed, reason:%s", pSched->label, strerror(errno)); + goto _error; + } + + if (sem_init(&pSched->fullSem, 0, 0) != 0) { + pError("init %s:full semaphore failed, reason:%s", pSched->label, strerror(errno)); + goto _error; + } + + if ((pSched->queue = (SSchedMsg *)malloc((size_t)pSched->queueSize * sizeof(SSchedMsg))) == NULL) { + pError("%s: no enough memory for queue, reason:%s", pSched->label, strerror(errno)); + goto _error; + } + + memset(pSched->queue, 0, (size_t)pSched->queueSize * sizeof(SSchedMsg)); + pSched->fullSlot = 0; + pSched->emptySlot = 0; + + pSched->qthread = malloc(sizeof(pthread_t) * (size_t)pSched->numOfThreads); + + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + + for (int i = 0; i < pSched->numOfThreads; ++i) { + if (pthread_create(pSched->qthread + i, &attr, taosProcessSchedQueue, (void *)pSched) != 0) { + pError("%s: failed to create rpc thread, reason:%s", pSched->label, strerror(errno)); + goto _error; + } + } + + pTrace("%s scheduler is initialized, numOfThreads:%d", pSched->label, pSched->numOfThreads); + + return (void *)pSched; + +_error: + taosCleanUpScheduler(pSched); + return NULL; +} + +void *taosProcessSchedQueue(void *param) { + SSchedMsg msg; + SSchedQueue *pSched = (SSchedQueue *)param; + + while (1) { + if (sem_wait(&pSched->fullSem) != 0) { + pError("wait %s fullSem failed, errno:%d, reason:%s", pSched->label, errno, strerror(errno)); + if (errno == EINTR) { + /* sem_wait is interrupted by interrupt, ignore and continue */ + continue; + } + } + + if (pthread_mutex_lock(&pSched->queueMutex) != 0) + pError("lock %s queueMutex failed, reason:%s", pSched->label, strerror(errno)); + + msg = pSched->queue[pSched->fullSlot]; + memset(pSched->queue + pSched->fullSlot, 0, sizeof(SSchedMsg)); + pSched->fullSlot = (pSched->fullSlot + 1) % pSched->queueSize; + + if (pthread_mutex_unlock(&pSched->queueMutex) != 0) + pError("unlock %s queueMutex failed, reason:%s\n", pSched->label, strerror(errno)); + + if (sem_post(&pSched->emptySem) != 0) + pError("post %s emptySem failed, reason:%s\n", pSched->label, strerror(errno)); + + if (msg.fp) + (*(msg.fp))(&msg); + else if (msg.tfp) + (*(msg.tfp))(msg.ahandle, msg.thandle); + } +} + +int taosScheduleTask(void *qhandle, SSchedMsg *pMsg) { + SSchedQueue *pSched = (SSchedQueue *)qhandle; + if (pSched == NULL) { + pError("sched is not ready, msg:%p is dropped", pMsg); + return 0; + } + + if (sem_wait(&pSched->emptySem) != 0) pError("wait %s emptySem failed, reason:%s", pSched->label, strerror(errno)); + + if (pthread_mutex_lock(&pSched->queueMutex) != 0) + pError("lock %s queueMutex failed, reason:%s", pSched->label, strerror(errno)); + + pSched->queue[pSched->emptySlot] = *pMsg; + pSched->emptySlot = (pSched->emptySlot + 1) % pSched->queueSize; + + if (pthread_mutex_unlock(&pSched->queueMutex) != 0) + pError("unlock %s queueMutex failed, reason:%s", pSched->label, strerror(errno)); + + if (sem_post(&pSched->fullSem) != 0) pError("post %s fullSem failed, reason:%s", pSched->label, strerror(errno)); + + return 0; +} + +void taosCleanUpScheduler(void *param) { + SSchedQueue *pSched = (SSchedQueue *)param; + if (pSched == NULL) return; + + for (int i = 0; i < pSched->numOfThreads; ++i) { + pthread_cancel(pSched->qthread[i]); + pthread_join(pSched->qthread[i], NULL); + } + + sem_destroy(&pSched->emptySem); + sem_destroy(&pSched->fullSem); + pthread_mutex_destroy(&pSched->queueMutex); + + free(pSched->queue); + free(pSched->qthread); +} diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c new file mode 100644 index 000000000000..c26c39982cea --- /dev/null +++ b/src/util/src/tskiplist.c @@ -0,0 +1,911 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "tlog.h" +#include "tsdb.h" +#include "tskiplist.h" +#include "tutil.h" + +static uint32_t doGetRand(SRandom *pRand, int32_t n) { + const uint32_t val = 2147483647L; + uint64_t p = pRand->s * 16807; + + pRand->s = (uint32_t)((p >> 31) + (p & val)); + if (pRand->s > val) { + pRand->s -= val; + } + + return (pRand->s % n); +} + +static SRandom getRand(uint32_t s) { + uint32_t seed = s & 0x7FFFFFFF; + if (seed == 0 || seed == INT32_MAX) { + seed = 1; + } + + struct SRandom r = {seed, doGetRand}; + return r; +} + +void recordNodeEachLevel(tSkipList *pSkipList, int32_t nLevel); + +int32_t getSkipListNodeLevel(tSkipList *pSkipList); + +void tSkipListDoInsert(tSkipList *pSkipList, tSkipListNode **forward, int32_t nLevel, tSkipListNode *pNode); + +static int32_t getSkipListNodeRandomHeight(tSkipList *pSkipList) { + const uint32_t factor = 4; + + int32_t n = 1; + while ((pSkipList->r.rand(&pSkipList->r, MAX_SKIP_LIST_LEVEL) % factor) == 0 && n <= MAX_SKIP_LIST_LEVEL) { + n++; + } + + return n; +} + +void tSkipListDoRecordPutNode(tSkipList *pSkipList) { + const int32_t MAX_RECORD_NUM = 1000; + + if (pSkipList->state.nInsertObjs == MAX_RECORD_NUM) { + pSkipList->state.nInsertObjs = 1; + pSkipList->state.nTotalStepsForInsert = 0; + pSkipList->state.nTotalElapsedTimeForInsert = 0; + } else { + pSkipList->state.nInsertObjs++; + } +} + +int32_t compareIntVal(const void *pLeft, const void *pRight) { + int64_t lhs = ((tSkipListKey *)pLeft)->i64Key; + int64_t rhs = ((tSkipListKey *)pRight)->i64Key; + + DEFAULT_COMP(lhs, rhs); +} + +int32_t compareIntDoubleVal(const void *pLeft, const void *pRight) { + int64_t lhs = ((tSkipListKey *)pLeft)->i64Key; + double rhs = ((tSkipListKey *)pRight)->dKey; + if (fabs(lhs - rhs) < FLT_EPSILON) { + return 0; + } else { + return (lhs > rhs) ? 1 : -1; + } +} + +int32_t compareDoubleIntVal(const void *pLeft, const void *pRight) { + double lhs = ((tSkipListKey *)pLeft)->dKey; + int64_t rhs = ((tSkipListKey *)pRight)->i64Key; + if (fabs(lhs - rhs) < FLT_EPSILON) { + return 0; + } else { + return (lhs > rhs) ? 1 : -1; + } +} + +int32_t compareDoubleVal(const void *pLeft, const void *pRight) { + double ret = (((tSkipListKey *)pLeft)->dKey - ((tSkipListKey *)pRight)->dKey); + if (fabs(ret) < FLT_EPSILON) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } +} + +int32_t compareStrVal(const void *pLeft, const void *pRight) { + tSkipListKey *pL = (tSkipListKey *)pLeft; + tSkipListKey *pR = (tSkipListKey *)pRight; + + if (pL->nLen == 0 && pR->nLen == 0) { + return 0; + } + + /* + * handle only one-side bound compare situation, there is only lower bound or + * only + * upper bound + */ + if (pL->nLen == -1) { + return 1; // no lower bound, lower bound is minimum, always return -1; + } else if (pR->nLen == -1) { + return -1; // no upper bound, upper bound is maximum situation, always + // return 1; + } + + int32_t ret = strcmp(((tSkipListKey *)pLeft)->pz, ((tSkipListKey *)pRight)->pz); + + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } +} + +int32_t compareWStrVal(const void *pLeft, const void *pRight) { + tSkipListKey *pL = (tSkipListKey *)pLeft; + tSkipListKey *pR = (tSkipListKey *)pRight; + + if (pL->nLen == 0 && pR->nLen == 0) { + return 0; + } + + /* + * handle only one-side bound compare situation, + * there is only lower bound or only upper bound + */ + if (pL->nLen == -1) { + return 1; // no lower bound, lower bound is minimum, always return -1; + } else if (pR->nLen == -1) { + return -1; // no upper bound, upper bound is maximum situation, always + // return 1; + } + + int32_t ret = wcscmp(((tSkipListKey *)pLeft)->wpz, ((tSkipListKey *)pRight)->wpz); + + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } +} + +static __compar_fn_t getKeyFilterComparator(tSkipList *pSkipList, int32_t filterDataType) { + __compar_fn_t comparator = NULL; + + switch (pSkipList->keyType) { + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_BOOL: { + if (filterDataType >= TSDB_DATA_TYPE_BOOL && filterDataType <= TSDB_DATA_TYPE_BIGINT) { + comparator = compareIntVal; + } else if (filterDataType >= TSDB_DATA_TYPE_FLOAT && filterDataType <= TSDB_DATA_TYPE_DOUBLE) { + comparator = compareIntDoubleVal; + } + break; + } + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + if (filterDataType >= TSDB_DATA_TYPE_BOOL && filterDataType <= TSDB_DATA_TYPE_BIGINT) { + comparator = compareDoubleIntVal; + } else if (filterDataType >= TSDB_DATA_TYPE_FLOAT && filterDataType <= TSDB_DATA_TYPE_DOUBLE) { + comparator = compareDoubleVal; + } + break; + } + case TSDB_DATA_TYPE_BINARY: + comparator = compareStrVal; + break; + case TSDB_DATA_TYPE_NCHAR: + comparator = compareWStrVal; + break; + default: + comparator = compareIntVal; + break; + } + + return comparator; +} + +static __compar_fn_t getKeyComparator(int32_t keyType) { + __compar_fn_t comparator = NULL; + + switch (keyType) { + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_BOOL: + comparator = compareIntVal; + break; + + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + comparator = compareDoubleVal; + break; + + case TSDB_DATA_TYPE_BINARY: + comparator = compareStrVal; + break; + + case TSDB_DATA_TYPE_NCHAR: + comparator = compareWStrVal; + break; + + default: + comparator = compareIntVal; + break; + } + + return comparator; +} + +int32_t tSkipListCreate(tSkipList **pSkipList, int16_t nMaxLevel, int16_t keyType, int16_t nMaxKeyLen, + int32_t (*funcp)()) { + (*pSkipList) = (tSkipList *)calloc(1, sizeof(tSkipList)); + if ((*pSkipList) == NULL) { + return -1; + } + + (*pSkipList)->keyType = keyType; + + (*pSkipList)->comparator = getKeyComparator(keyType); + (*pSkipList)->pHead.pForward = (tSkipListNode **)calloc(1, POINTER_BYTES * MAX_SKIP_LIST_LEVEL); + + (*pSkipList)->nMaxLevel = MAX_SKIP_LIST_LEVEL; + (*pSkipList)->nLevel = 1; + + (*pSkipList)->nMaxKeyLen = nMaxKeyLen; + (*pSkipList)->nMaxLevel = nMaxLevel; + + if (pthread_rwlock_init(&(*pSkipList)->lock, NULL) != 0) { + return -1; + } + + srand(time(NULL)); + (*pSkipList)->r = getRand(time(NULL)); + + (*pSkipList)->state.nTotalMemSize += sizeof(tSkipList); + return 0; +} + +static void doRemove(tSkipList *pSkipList, tSkipListNode *pNode, tSkipListNode *forward[]) { + int32_t level = pNode->nLevel; + for (int32_t j = level - 1; j >= 0; --j) { + if ((forward[j]->pForward[j] != NULL) && (forward[j]->pForward[j]->pForward[j])) { + forward[j]->pForward[j]->pForward[j]->pBackward[j] = forward[j]; + } + + if (forward[j]->pForward[j] != NULL) { + forward[j]->pForward[j] = forward[j]->pForward[j]->pForward[j]; + } + } + + pSkipList->state.nTotalMemSize -= (sizeof(tSkipListNode) + POINTER_BYTES * pNode->nLevel * 2); + removeNodeEachLevel(pSkipList, pNode->nLevel); + + tfree(pNode); + --pSkipList->nSize; +} + +static size_t getOneNodeSize(const tSkipListKey *pKey, int32_t nLevel) { + size_t size = sizeof(tSkipListNode) + sizeof(intptr_t) * (nLevel << 1); + if (pKey->nType == TSDB_DATA_TYPE_BINARY) { + size += pKey->nLen + 1; + } else if (pKey->nType == TSDB_DATA_TYPE_NCHAR) { + size += (pKey->nLen + 1) * TSDB_NCHAR_SIZE; + } + + return size; +} + +static tSkipListNode *tSkipListCreateNode(void *pData, const tSkipListKey *pKey, int32_t nLevel) { + size_t nodeSize = getOneNodeSize(pKey, nLevel); + tSkipListNode *pNode = (tSkipListNode *)calloc(1, nodeSize); + + pNode->pForward = (tSkipListNode **)(&pNode[1]); + pNode->pBackward = (pNode->pForward + nLevel); + + pNode->pData = pData; + + pNode->key = *pKey; + if (pKey->nType == TSDB_DATA_TYPE_BINARY) { + pNode->key.pz = (char *)(pNode->pBackward + nLevel); + + strcpy(pNode->key.pz, pKey->pz); + pNode->key.pz[pKey->nLen] = 0; + } else if (pKey->nType == TSDB_DATA_TYPE_NCHAR) { + pNode->key.wpz = (wchar_t *)(pNode->pBackward + nLevel); + wcsncpy(pNode->key.wpz, pKey->wpz, pKey->nLen); + pNode->key.wpz[pKey->nLen] = 0; + } + + pNode->nLevel = nLevel; + return pNode; +} + +tSkipListKey tSkipListCreateKey(int32_t type, char *val, size_t keyLength) { + tSkipListKey k; + k.nType = (uint8_t)type; + + switch (type) { + case TSDB_DATA_TYPE_INT: { + k.i64Key = *(int32_t *)val; + return k; + } + case TSDB_DATA_TYPE_BIGINT: { + k.i64Key = *(int64_t *)val; + return k; + } + case TSDB_DATA_TYPE_DOUBLE: { + k.dKey = *(double *)val; + return k; + } + case TSDB_DATA_TYPE_FLOAT: { + k.dKey = *(float *)val; + return k; + } + case TSDB_DATA_TYPE_SMALLINT: { + k.i64Key = *(int16_t *)val; + return k; + } + case TSDB_DATA_TYPE_TINYINT: { + k.i64Key = *(int8_t *)val; + return k; + } + case TSDB_DATA_TYPE_BOOL: { + k.i64Key = *(int8_t *)val; + return k; + } + case TSDB_DATA_TYPE_BINARY: { + k.pz = malloc(keyLength + 1); + k.nLen = keyLength; + memcpy(k.pz, val, keyLength); + k.pz[keyLength] = 0; + return k; + } + case TSDB_DATA_TYPE_NCHAR: { + k.pz = malloc(keyLength + TSDB_NCHAR_SIZE); + k.nLen = keyLength / TSDB_NCHAR_SIZE; + + wcsncpy(k.wpz, (wchar_t *)val, k.nLen); + k.wpz[k.nLen] = 0; + + return k; + } + default: + return k; + } +} + +void tSkipListDestroyKey(tSkipListKey *pKey) { tVariantDestroy(pKey); } + +void tSkipListDestroy(tSkipList **pSkipList) { + if ((*pSkipList) == NULL) { + return; + } + + pthread_rwlock_wrlock(&(*pSkipList)->lock); + tSkipListNode *pNode = (*pSkipList)->pHead.pForward[0]; + while (pNode) { + tSkipListNode *pTemp = pNode; + pNode = pNode->pForward[0]; + tfree(pTemp); + } + + tfree((*pSkipList)->pHead.pForward); + pthread_rwlock_unlock(&(*pSkipList)->lock); + tfree(*pSkipList); +} + +tSkipListNode *tSkipListPut(tSkipList *pSkipList, void *pData, tSkipListKey *pKey, int32_t insertIdenticalKey) { + if (pSkipList == NULL) { + return NULL; + } + + pthread_rwlock_wrlock(&pSkipList->lock); + + // record one node is put into skiplist + tSkipListDoRecordPutNode(pSkipList); + + tSkipListNode *px = &pSkipList->pHead; + + tSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; + for (int32_t i = pSkipList->nLevel - 1; i >= 0; --i) { + while (px->pForward[i] != NULL && (pSkipList->comparator(&px->pForward[i]->key, pKey) < 0)) { + px = px->pForward[i]; + } + + pSkipList->state.nTotalStepsForInsert++; + forward[i] = px; + } + + if ((insertIdenticalKey == 0) && forward[0] != &pSkipList->pHead && + (pSkipList->comparator(&forward[0]->key, pKey) == 0)) { + /* ignore identical key*/ + pthread_rwlock_unlock(&pSkipList->lock); + return forward[0]; + } + + int32_t nLevel = getSkipListNodeLevel(pSkipList); + recordNodeEachLevel(pSkipList, nLevel); + + tSkipListNode *pNode = tSkipListCreateNode(pData, pKey, nLevel); + tSkipListDoInsert(pSkipList, forward, nLevel, pNode); + + pSkipList->nSize += 1; + + // char tmpstr[512] = {0}; + // tVariantToString(&pNode->key, tmpstr); + // pTrace("skiplist:%p, node added, key:%s, total list len:%d", pSkipList, + // tmpstr, pSkipList->nSize); + + pSkipList->state.nTotalMemSize += getOneNodeSize(pKey, nLevel); + pthread_rwlock_unlock(&pSkipList->lock); + + return pNode; +} + +void tSkipListDoInsert(tSkipList *pSkipList, tSkipListNode **forward, int32_t nLevel, tSkipListNode *pNode) { + for (int32_t i = 0; i < nLevel; ++i) { + tSkipListNode *x = forward[i]; + if (x != NULL) { + pNode->pBackward[i] = x; + if (x->pForward[i]) x->pForward[i]->pBackward[i] = pNode; + + pNode->pForward[i] = x->pForward[i]; + x->pForward[i] = pNode; + } else { + pSkipList->pHead.pForward[i] = pNode; + pNode->pBackward[i] = &(pSkipList->pHead); + } + } +} + +int32_t getSkipListNodeLevel(tSkipList *pSkipList) { + int32_t nLevel = getSkipListNodeRandomHeight(pSkipList); + if (pSkipList->nSize == 0) { + nLevel = 1; + pSkipList->nLevel = 1; + } else { + if (nLevel > pSkipList->nLevel && pSkipList->nLevel < pSkipList->nMaxLevel) { + nLevel = (++pSkipList->nLevel); + } + } + return nLevel; +} + +void recordNodeEachLevel(tSkipList *pSkipList, int32_t nLevel) { // record link count in each level + for (int32_t i = 0; i < nLevel; ++i) { + pSkipList->state.nLevelNodeCnt[i]++; + } +} + +void removeNodeEachLevel(tSkipList *pSkipList, int32_t nLevel) { + for (int32_t i = 0; i < nLevel; ++i) { + pSkipList->state.nLevelNodeCnt[i]--; + } +} + +tSkipListNode *tSkipListGetOne(tSkipList *pSkipList, tSkipListKey *pKey) { + int32_t sLevel = pSkipList->nLevel - 1; + int32_t ret = -1; + + tSkipListNode *x = &pSkipList->pHead; + + pthread_rwlock_rdlock(&pSkipList->lock); + pSkipList->state.queryCount++; + + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pKey->nType); + + for (int32_t i = sLevel; i >= 0; --i) { + while (x->pForward[i] != NULL && (ret = filterComparator(&x->pForward[i]->key, pKey)) < 0) { + x = x->pForward[i]; + } + + if (ret == 0) { + pthread_rwlock_unlock(&pSkipList->lock); + return x->pForward[i]; + } + } + + pthread_rwlock_unlock(&pSkipList->lock); + return NULL; +} + +static int32_t tSkipListEndParQuery(tSkipList *pSkipList, tSkipListNode *pStartNode, tSkipListKey *pEndKey, + int32_t cond, tSkipListNode ***pRes) { + pthread_rwlock_rdlock(&pSkipList->lock); + tSkipListNode *p = pStartNode; + int32_t numOfRes = 0; + + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pEndKey->nType); + while (p != NULL) { + int32_t ret = filterComparator(&p->key, pEndKey); + if (ret > 0) { + break; + } + + if (ret < 0) { + numOfRes++; + p = p->pForward[0]; + } else if (ret == 0) { + if (cond == TSDB_RELATION_LESS_EQUAL) { + numOfRes++; + p = p->pForward[0]; + } else { + break; + } + } + } + + (*pRes) = (tSkipListNode **)malloc(POINTER_BYTES * numOfRes); + for (int32_t i = 0; i < numOfRes; ++i) { + (*pRes)[i] = pStartNode; + pStartNode = pStartNode->pForward[0]; + } + pthread_rwlock_unlock(&pSkipList->lock); + + return numOfRes; +} + +/* + * maybe return the copy of tSkipListNode would be better + */ +int32_t tSkipListGets(tSkipList *pSkipList, tSkipListKey *pKey, tSkipListNode ***pRes) { + (*pRes) = NULL; + + tSkipListNode *pNode = tSkipListGetOne(pSkipList, pKey); + if (pNode == NULL) { + return 0; + } + + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pKey->nType); + + // backward check if previous nodes are with the same value. + tSkipListNode *pPrev = pNode->pBackward[0]; + while ((pPrev != &pSkipList->pHead) && filterComparator(&pPrev->key, pKey) == 0) { + pPrev = pPrev->pBackward[0]; + } + + return tSkipListEndParQuery(pSkipList, pPrev->pForward[0], &pNode->key, TSDB_RELATION_LESS_EQUAL, pRes); +} + +static tSkipListNode *tSkipListParQuery(tSkipList *pSkipList, tSkipListKey *pKey, int32_t cond) { + int32_t sLevel = pSkipList->nLevel - 1; + int32_t ret = -1; + + tSkipListNode *x = &pSkipList->pHead; + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pKey->nType); + + pthread_rwlock_rdlock(&pSkipList->lock); + + if (cond == TSDB_RELATION_LARGE_EQUAL || cond == TSDB_RELATION_LARGE) { + for (int32_t i = sLevel; i >= 0; --i) { + while (x->pForward[i] != NULL && (ret = filterComparator(&x->pForward[i]->key, pKey)) < 0) { + x = x->pForward[i]; + } + } + + // backward check if previous nodes are with the same value. + if (cond == TSDB_RELATION_LARGE_EQUAL && ret == 0) { + tSkipListNode *pNode = x->pForward[0]; + while ((pNode->pBackward[0] != &pSkipList->pHead) && (filterComparator(&pNode->pBackward[0]->key, pKey) == 0)) { + pNode = pNode->pBackward[0]; + } + pthread_rwlock_unlock(&pSkipList->lock); + return pNode; + } + + if (ret > 0 || cond == TSDB_RELATION_LARGE_EQUAL) { + pthread_rwlock_unlock(&pSkipList->lock); + return x->pForward[0]; + } else { // cond == TSDB_RELATION_LARGE && ret == 0 + tSkipListNode *pn = x->pForward[0]; + while (pn != NULL && filterComparator(&pn->key, pKey) == 0) { + pn = pn->pForward[0]; + } + pthread_rwlock_unlock(&pSkipList->lock); + return pn; + } + } + + pthread_rwlock_unlock(&pSkipList->lock); + return NULL; +} + +int32_t tSkipListIterateList(tSkipList *pSkipList, tSkipListNode ***pRes, bool (*fp)(tSkipListNode *, void *), + void *param) { + pthread_rwlock_rdlock(&pSkipList->lock); + + (*pRes) = (tSkipListNode **)malloc(POINTER_BYTES * pSkipList->nSize); + tSkipListNode *pStartNode = pSkipList->pHead.pForward[0]; + int32_t num = 0; + for (int32_t i = 0; i < pSkipList->nSize; ++i) { + if (pStartNode == NULL) { + pError("error skiplist %p, required length:%d, actual length:%d", pSkipList, pSkipList->nSize, i - 1); +#ifdef _DEBUG_VIEW + tSkipListPrint(pSkipList, 1); +#endif + break; + } + + if (fp == NULL || (fp != NULL && fp(pStartNode, param) == true)) { + (*pRes)[num++] = pStartNode; + } + + pStartNode = pStartNode->pForward[0]; + } + pthread_rwlock_unlock(&pSkipList->lock); + return num; +} + +int32_t tSkipListRangeQuery(tSkipList *pSkipList, tSKipListQueryCond *pCond, tSkipListNode ***pRes) { + pSkipList->state.queryCount++; + tSkipListNode *pStart = tSkipListParQuery(pSkipList, &pCond->lowerBnd, pCond->lowerBndRelOptr); + if (pStart == 0) { + *pRes = NULL; + return 0; + } + + return tSkipListEndParQuery(pSkipList, pStart, &pCond->upperBnd, pCond->upperBndRelOptr, pRes); +} + +static bool removeSupport(tSkipList *pSkipList, tSkipListNode **forward, tSkipListKey *pKey) { + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pKey->nType); + + if (filterComparator(&forward[0]->pForward[0]->key, pKey) == 0) { + tSkipListNode *p = forward[0]->pForward[0]; + doRemove(pSkipList, p, forward); + } else { // failed to find the node of specified value,abort + return false; + } + + // compress the minimum level of skip list + while (pSkipList->nLevel > 0 && pSkipList->pHead.pForward[pSkipList->nLevel - 1] == NULL) { + pSkipList->nLevel -= 1; + } + + return true; +} + +void tSkipListRemoveNode(tSkipList *pSkipList, tSkipListNode *pNode) { + tSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; + + pthread_rwlock_rdlock(&pSkipList->lock); + for (int32_t i = 0; i < pNode->nLevel; ++i) { + forward[i] = pNode->pBackward[i]; + } + + removeSupport(pSkipList, forward, &pNode->key); + pthread_rwlock_unlock(&pSkipList->lock); +} + +bool tSkipListRemove(tSkipList *pSkipList, tSkipListKey *pKey) { + tSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pKey->nType); + + pthread_rwlock_rdlock(&pSkipList->lock); + + tSkipListNode *x = &pSkipList->pHead; + for (int32_t i = pSkipList->nLevel - 1; i >= 0; --i) { + while (x->pForward[i] != NULL && (filterComparator(&x->pForward[i]->key, pKey) < 0)) { + x = x->pForward[i]; + } + forward[i] = x; + } + + bool ret = removeSupport(pSkipList, forward, pKey); + pthread_rwlock_unlock(&pSkipList->lock); + + return ret; +} + +void tSkipListPrint(tSkipList *pSkipList, int16_t nlevel) { + if (pSkipList == NULL || pSkipList->nLevel < nlevel || nlevel <= 0) { + return; + } + + tSkipListNode *p = pSkipList->pHead.pForward[nlevel - 1]; + int32_t id = 1; + while (p) { + switch (pSkipList->keyType) { + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BIGINT: + fprintf(stdout, "%d: %ld \n", id++, p->key.i64Key); + break; + case TSDB_DATA_TYPE_BINARY: + fprintf(stdout, "%d: %s \n", id++, p->key.pz); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(stdout, "%d: %lf \n", id++, p->key.dKey); + break; + default: + fprintf(stdout, "\n"); + } + p = p->pForward[nlevel - 1]; + } +} + +/* + * query processor based on query condition + */ +int32_t tSkipListQuery(tSkipList *pSkipList, tSKipListQueryCond *pQueryCond, tSkipListNode ***pResult) { + // query condition check + int32_t rel = 0; + __compar_fn_t comparator = getKeyComparator(pQueryCond->lowerBnd.nType); + + if (pSkipList == NULL || pQueryCond == NULL || pSkipList->nSize == 0 || + (((rel = comparator(&pQueryCond->lowerBnd, &pQueryCond->upperBnd)) > 0 && + pQueryCond->lowerBnd.nType != TSDB_DATA_TYPE_NCHAR && pQueryCond->lowerBnd.nType != TSDB_DATA_TYPE_BINARY))) { + (*pResult) = NULL; + return 0; + } + + if (rel == 0) { + /* + * 0 means: pQueryCond->lowerBnd == pQueryCond->upperBnd + * point query + */ + if (pQueryCond->lowerBndRelOptr == TSDB_RELATION_LARGE_EQUAL && + pQueryCond->upperBndRelOptr == TSDB_RELATION_LESS_EQUAL) { // point query + return tSkipListGets(pSkipList, &pQueryCond->lowerBnd, pResult); + } else { + (*pResult) = NULL; + return 0; + } + } else { + /* range query, query operation code check */ + return tSkipListRangeQuery(pSkipList, pQueryCond, pResult); + } +} + +typedef struct MultipleQueryResult { + int32_t len; + tSkipListNode **pData; +} MultipleQueryResult; + +static int32_t mergeQueryResult(MultipleQueryResult *pResults, int32_t numOfResSet, tSkipListNode ***pRes) { + int32_t total = 0; + for (int32_t i = 0; i < numOfResSet; ++i) { + total += pResults[i].len; + } + + (*pRes) = malloc(POINTER_BYTES * total); + int32_t idx = 0; + + for (int32_t i = 0; i < numOfResSet; ++i) { + MultipleQueryResult *pOneResult = &pResults[i]; + for (int32_t j = 0; j < pOneResult->len; ++j) { + (*pRes)[idx++] = pOneResult->pData[j]; + } + } + + return total; +} + +static void removeDuplicateKey(tSkipListKey *pKey, int32_t *numOfKey, __compar_fn_t comparator) { + if (*numOfKey == 1) { + return; + } + + qsort(pKey, *numOfKey, sizeof(pKey[0]), comparator); + int32_t i = 0, j = 1; + + while (i < (*numOfKey) && j < (*numOfKey)) { + int32_t ret = comparator(&pKey[i], &pKey[j]); + if (ret == 0) { + j++; + } else { + pKey[i + 1] = pKey[j]; + i++; + j++; + } + } + + (*numOfKey) = i + 1; +} + +int32_t mergeResult(const tSkipListKey *pKey, int32_t numOfKey, tSkipListNode ***pRes, __compar_fn_t comparator, + tSkipListNode *pNode) { + int32_t i = 0, j = 0; + // merge two sorted arrays in O(n) time + while (i < numOfKey && pNode != NULL) { + int32_t ret = comparator(&pNode->key, &pKey[i]); + if (ret < 0) { + (*pRes)[j++] = pNode; + pNode = pNode->pForward[0]; + } else if (ret == 0) { + pNode = pNode->pForward[0]; + } else { // pNode->key > pkey[i] + i++; + } + } + + while (pNode != NULL) { + (*pRes)[j++] = pNode; + pNode = pNode->pForward[0]; + } + return j; +} + +int32_t tSkipListPointQuery(tSkipList *pSkipList, tSkipListKey *pKey, int32_t numOfKey, tSkipListPointQueryType type, + tSkipListNode ***pRes) { + if (numOfKey == 0 || pKey == NULL || pSkipList == NULL || pSkipList->nSize == 0 || + (type != INCLUDE_POINT_QUERY && type != EXCLUDE_POINT_QUERY)) { + (*pRes) = NULL; + return 0; + } + + __compar_fn_t comparator = getKeyComparator(pKey->nType); + removeDuplicateKey(pKey, &numOfKey, comparator); + + if (type == INCLUDE_POINT_QUERY) { + if (numOfKey == 1) { + return tSkipListGets(pSkipList, &pKey[0], pRes); + } else { + MultipleQueryResult *pTempResult = (MultipleQueryResult *)malloc(sizeof(MultipleQueryResult) * numOfKey); + for (int32_t i = 0; i < numOfKey; ++i) { + pTempResult[i].len = tSkipListGets(pSkipList, &pKey[i], &pTempResult[i].pData); + } + int32_t num = mergeQueryResult(pTempResult, numOfKey, pRes); + + for (int32_t i = 0; i < numOfKey; ++i) { + free(pTempResult[i].pData); + } + free(pTempResult); + return num; + } + } else { // exclude query + *pRes = malloc(POINTER_BYTES * pSkipList->nSize); + + __compar_fn_t filterComparator = getKeyFilterComparator(pSkipList, pKey->nType); + + tSkipListNode *pNode = pSkipList->pHead.pForward[0]; + int32_t retLen = mergeResult(pKey, numOfKey, pRes, filterComparator, pNode); + + if (retLen < pSkipList->nSize) { + (*pRes) = realloc(*pRes, POINTER_BYTES * retLen); + } + return retLen; + } +} + +int32_t tSkipListDefaultCompare(tSkipList *pSkipList, tSkipListKey *a, tSkipListKey *b) { + switch (pSkipList->keyType) { + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_BOOL: { + if (a->i64Key == b->i64Key) { + return 0; + } else { + return a->i64Key > b->i64Key ? 1 : -1; + } + }; + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + if (fabs(a->dKey - b->dKey) < FLT_EPSILON) { + return 0; + } else { + return a->dKey > b->dKey ? 1 : -1; + } + }; + case TSDB_DATA_TYPE_BINARY: { + if (a->nLen == b->nLen) { + int32_t ret = strncmp(a->pz, b->pz, a->nLen); + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } + } else { + return a->nLen > b->nLen ? 1 : -1; + } + }; + } + + return 0; +} diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c new file mode 100644 index 000000000000..868e606b7167 --- /dev/null +++ b/src/util/src/tsocket.c @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tlog.h" +#include "tsocket.h" +#include "tutil.h" + +unsigned int ip2uint(const char *const ip_addr); +int taosSetNonblocking(int sock, int on); +int taosSetSockOpt(int socketfd, int level, int optname, void *optval, int optlen); + +/* + * Function to get the public ip address of current machine. If get IP + * successfully, return 0, else, return -1. The return values is ip. + * + * Use: + * if (taosGetPublicIp(ip) != 0) { + * perror("Fail to get public IP address\n"); + * exit(EXIT_FAILURE); + * } + */ +int taosGetPublicIp(char *const ip) { + /* bool flag; */ + int flag; + int sock; + char ** pptr = NULL; + struct sockaddr_in destAddr; + struct hostent * ptr = NULL; + char destIP[128]; + char szBuffer[] = { + "GET / HTTP/1.1\nHost: ident.me\nUser-Agent: curl/7.47.0\nAccept: " + "*/*\n\n"}; + char res[1024]; + + // Create socket + sock = (int)socket(AF_INET, SOCK_STREAM, 0); + if (sock == -1) { + return -1; + } + + bzero((void *)&destAddr, sizeof(destAddr)); + destAddr.sin_family = AF_INET; + destAddr.sin_port = htons(80); + + ptr = gethostbyname("ident.me"); + if (ptr == NULL) { + return -1; + } + + // Loop to find a valid IP address + for (flag = 0, pptr = ptr->h_addr_list; NULL != *pptr; ++pptr) { + inet_ntop(ptr->h_addrtype, *pptr, destIP, sizeof(destIP)); + destAddr.sin_addr.s_addr = inet_addr(destIP); + if (connect(sock, (struct sockaddr *)&destAddr, sizeof(struct sockaddr)) != -1) { + flag = 1; + break; + } + } + + // Check if the host is available. + if (flag == 0) { + return -1; + } + + // Check send. + if (strlen(szBuffer) != taosWriteSocket(sock, szBuffer, (size_t)strlen(szBuffer))) { + return -1; + } + + // Receive response. + if (taosReadSocket(sock, res, 1024) == -1) { + return -1; + } + + // Extract the IP address from the response. + int c_start = 0, c_end = 0; + for (; c_start < (int)strlen(res); c_start = c_end + 1) { + for (c_end = c_start; c_end < (int)strlen(res) && res[c_end] != '\n'; c_end++) { + } + + if (c_end >= (int)strlen(res)) { + return -1; + } + + if (res[c_start] >= '0' && res[c_start] <= '9') { + strncpy(ip, res + c_start, (size_t)(c_end - c_start)); + ip[c_end - c_start] = '\0'; + break; + } + } + + return 0; +} + +// Function converting an IP address string to an unsigned int. +unsigned int ip2uint(const char *const ip_addr) { + char ip_addr_cpy[20]; + char ip[5]; + + strcpy(ip_addr_cpy, ip_addr); + + char *s_start, *s_end; + s_start = ip_addr_cpy; + s_end = ip_addr_cpy; + + int k; + + for (k = 0; *s_start != '\0'; s_start = s_end) { + for (s_end = s_start; *s_end != '.' && *s_end != '\0'; s_end++) { + } + if (*s_end == '.') { + *s_end = '\0'; + s_end++; + } + ip[k++] = (char)atoi(s_start); + } + + ip[k] = '\0'; + + return *((unsigned int *)ip); +} + +int taosWriteMsg(int fd, void *buf, int nbytes) { + int nleft, nwritten; + char *ptr = (char *)buf; + + nleft = nbytes; + + while (nleft > 0) { + nwritten = (int)taosWriteSocket(fd, (char *)ptr, (size_t)nleft); + if (nwritten <= 0) { + if (errno == EINTR) + continue; + else + return -1; + } else { + nleft -= nwritten; + ptr += nwritten; + } + } + + return (nbytes - nleft); +} + +int taosReadMsg(int fd, void *buf, int nbytes) { + int nleft, nread; + char *ptr = (char *)buf; + + nleft = nbytes; + + if (fd < 0) return -1; + + while (nleft > 0) { + nread = (int)taosReadSocket(fd, ptr, (size_t)nleft); + if (nread == 0) { + break; + } else if (nread < 0) { + if (errno == EINTR) { + continue; + } else { + return -1; + } + } else { + nleft -= nread; + ptr += nread; + } + } + + return (nbytes - nleft); +} + +int taosNonblockwrite(int fd, char *ptr, int nbytes) { + taosSetNonblocking(fd, 1); + + int nleft, nwritten, nready; + fd_set fset; + struct timeval tv; + + nleft = nbytes; + while (nleft > 0) { + tv.tv_sec = 30; + tv.tv_usec = 0; + FD_ZERO(&fset); + FD_SET(fd, &fset); + if ((nready = select(fd + 1, NULL, &fset, NULL, &tv)) == 0) { + errno = ETIMEDOUT; + pError("fd %d timeout, no enough space to write", fd); + break; + + } else if (nready < 0) { + if (errno == EINTR) continue; + + pError("select error, %d (%s)", errno, strerror(errno)); + return -1; + } + + nwritten = (int)send(fd, ptr, (size_t)nleft, MSG_NOSIGNAL); + if (nwritten <= 0) { + if (errno == EAGAIN || errno == EINTR) continue; + + pError("write error, %d (%s)", errno, strerror(errno)); + return -1; + } + + nleft -= nwritten; + ptr += nwritten; + } + + taosSetNonblocking(fd, 0); + + return (nbytes - nleft); +} + +int taosReadn(int fd, char *ptr, int nbytes) { + int nread, nready, nleft = nbytes; + + fd_set fset; + struct timeval tv; + + while (nleft > 0) { + tv.tv_sec = 30; + tv.tv_usec = 0; + FD_ZERO(&fset); + FD_SET(fd, &fset); + if ((nready = select(fd + 1, NULL, &fset, NULL, &tv)) == 0) { + errno = ETIMEDOUT; + pError("fd %d timeout\n", fd); + break; + } else if (nready < 0) { + if (errno == EINTR) continue; + pError("select error, %d (%s)", errno, strerror(errno)); + return -1; + } + + if ((nread = (int)taosReadSocket(fd, ptr, (size_t)nleft)) < 0) { + if (errno == EINTR) continue; + pError("read error, %d (%s)", errno, strerror(errno)); + return -1; + + } else if (nread == 0) { + pError("fd %d EOF", fd); + break; // EOF + } + + nleft -= nread; + ptr += nread; + } + + return (nbytes - nleft); +} + +int taosOpenUdpSocket(char *ip, short port) { + struct sockaddr_in localAddr; + int sockFd; + int ttl = 128; + int reuse, nocheck; + int bufSize = 8192000; + + pTrace("open udp socket:%s:%d", ip, port); + // if (tsAllowLocalhost) ip = "0.0.0.0"; + + memset((char *)&localAddr, 0, sizeof(localAddr)); + localAddr.sin_family = AF_INET; + localAddr.sin_addr.s_addr = inet_addr(ip); + localAddr.sin_port = (uint16_t)htons((uint16_t)port); + + if ((sockFd = (int)socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + pError("failed to open udp socket: %d (%s)", errno, strerror(errno)); + return -1; + } + + reuse = 1; + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, sizeof(reuse)) < 0) { + pError("setsockopt SO_REUSEADDR failed): %d (%s)", errno, strerror(errno)); + close(sockFd); + return -1; + }; + + nocheck = 1; + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_NO_CHECK, (void *)&nocheck, sizeof(nocheck)) < 0) { + pError("setsockopt SO_NO_CHECK failed: %d (%s)", errno, strerror(errno)); + close(sockFd); + return -1; + } + + ttl = 128; + if (taosSetSockOpt(sockFd, IPPROTO_IP, IP_TTL, (void *)&ttl, sizeof(ttl)) < 0) { + pError("setsockopt IP_TTL failed: %d (%s)", errno, strerror(errno)); + close(sockFd); + return -1; + } + + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_SNDBUF, (void *)&bufSize, sizeof(bufSize)) != 0) { + pError("failed to set the send buffer size for UDP socket\n"); + close(sockFd); + return -1; + } + + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_RCVBUF, (void *)&bufSize, sizeof(bufSize)) != 0) { + pError("failed to set the receive buffer size for UDP socket\n"); + close(sockFd); + return -1; + } + + /* bind socket to local address */ + if (bind(sockFd, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0) { + pError("failed to bind udp socket: %d (%s), %s:%d", errno, strerror(errno), ip, port); + taosCloseSocket(sockFd); + return -1; + } + + return sockFd; +} + +int taosOpenTcpClientSocket(char *destIp, short destPort, char *clientIp) { + int sockFd = 0; + struct sockaddr_in serverAddr, clientAddr; + int ret; + + pTrace("open tcp client socket:%s:%d", destIp, destPort); + // if (tsAllowLocalhost) destIp = "0.0.0.0"; + + sockFd = (int)socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); + + if (sockFd < 0) { + pError("failed to open the socket: %d (%s)", errno, strerror(errno)); + return -1; + } + + if (clientIp && clientIp[0] && clientIp[0] != '0') { + memset((char *)&clientAddr, 0, sizeof(clientAddr)); + clientAddr.sin_family = AF_INET; + clientAddr.sin_addr.s_addr = inet_addr(clientIp); + clientAddr.sin_port = 0; + + /* bind socket to client address */ + if (bind(sockFd, (struct sockaddr *)&clientAddr, sizeof(clientAddr)) < 0) { + pError("bind tcp client socket failed, client(%s:0), dest(%s:%d), reason:%d(%s)", + clientIp, destIp, destPort, errno, strerror(errno)); + close(sockFd); + return -1; + } + } + + memset((char *)&serverAddr, 0, sizeof(serverAddr)); + serverAddr.sin_family = AF_INET; + serverAddr.sin_addr.s_addr = inet_addr(destIp); + serverAddr.sin_port = (uint16_t)htons((uint16_t)destPort); + + ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); + + if (ret != 0) { + pError("failed to connect socket, ip:%s, port:%d, reason: %s", destIp, destPort, strerror(errno)); + taosCloseSocket(sockFd); + sockFd = -1; + } + + return sockFd; +} + +void taosCloseTcpSocket(int sockFd) { + struct linger linger; + linger.l_onoff = 1; + linger.l_linger = 0; + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_LINGER, (void *)&linger, sizeof(linger)) < 0) { + pError("setsockopt SO_LINGER failed: %d (%s)", errno, strerror(errno)); + } + + taosCloseSocket(sockFd); +} + +int taosKeepTcpAlive(int sockFd) { + int alive = 1; + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_KEEPALIVE, (void *)&alive, sizeof(alive)) < 0) { + pError("fd:%d setsockopt SO_KEEPALIVE failed: %d (%s)", sockFd, errno, strerror(errno)); + close(sockFd); + return -1; + } + + int probes = 3; + if (taosSetSockOpt(sockFd, SOL_TCP, TCP_KEEPCNT, (void *)&probes, sizeof(probes)) < 0) { + pError("fd:%d setsockopt SO_KEEPCNT failed: %d (%s)", sockFd, errno, strerror(errno)); + close(sockFd); + return -1; + } + + int alivetime = 10; + if (taosSetSockOpt(sockFd, SOL_TCP, TCP_KEEPIDLE, (void *)&alivetime, sizeof(alivetime)) < 0) { + pError("fd:%d setsockopt SO_KEEPIDLE failed: %d (%s)", sockFd, errno, strerror(errno)); + close(sockFd); + return -1; + } + + int interval = 3; + if (taosSetSockOpt(sockFd, SOL_TCP, TCP_KEEPINTVL, (void *)&interval, sizeof(interval)) < 0) { + pError("fd:%d setsockopt SO_KEEPINTVL failed: %d (%s)", sockFd, errno, strerror(errno)); + close(sockFd); + return -1; + } + + int nodelay = 1; + if (taosSetSockOpt(sockFd, IPPROTO_TCP, TCP_NODELAY, (void *)&nodelay, sizeof(nodelay)) < 0) { + pError("fd:%d setsockopt TCP_NODELAY failed %d (%s)", sockFd, errno, strerror(errno)); + close(sockFd); + return -1; + } + + return 0; +} + +int taosOpenTcpServerSocket(char *ip, short port) { + struct sockaddr_in serverAdd; + int sockFd; + int reuse; + + pTrace("open tcp server socket:%s:%d", ip, port); + // if (tsAllowLocalhost) ip = "0.0.0.0"; + + bzero((char *)&serverAdd, sizeof(serverAdd)); + serverAdd.sin_family = AF_INET; + serverAdd.sin_addr.s_addr = inet_addr(ip); + serverAdd.sin_port = (uint16_t)htons((uint16_t)port); + + if ((sockFd = (int)socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { + pError("failed to open TCP socket: %d (%s)", errno, strerror(errno)); + return -1; + } + + /* set REUSEADDR option, so the portnumber can be re-used */ + reuse = 1; + if (taosSetSockOpt(sockFd, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, sizeof(reuse)) < 0) { + pError("setsockopt SO_REUSEADDR failed: %d (%s)", errno, strerror(errno)); + close(sockFd); + return -1; + }; + + /* bind socket to server address */ + if (bind(sockFd, (struct sockaddr *)&serverAdd, sizeof(serverAdd)) < 0) { + pError("bind tcp server socket failed, %s:%d, reason:%d(%s)", ip, port, errno, strerror(errno)); + close(sockFd); + return -1; + } + + if (taosKeepTcpAlive(sockFd) < 0) return -1; + + if (listen(sockFd, 10) < 0) { + pError("listen tcp server socket failed, %s:%d, reason:%d(%s)", ip, port, errno, strerror(errno)); + return -1; + } + + return sockFd; +} + +int taosOpenRawSocket(char *ip) { + int fd, hold; + struct sockaddr_in rawAdd; + + pTrace("open udp raw socket:%s", ip); + // if (tsAllowLocalhost) ip = "0.0.0.0"; + + fd = (int)socket(AF_INET, SOCK_RAW, IPPROTO_UDP); + if (fd < 0) { + pError("failed to open raw socket: %d (%s)", errno, strerror(errno)); + return -1; + } + + hold = 1; + if (taosSetSockOpt(fd, IPPROTO_IP, IP_HDRINCL, (void *)&hold, sizeof(hold)) < 0) { + pError("failed to set hold option: %d (%s)", errno, strerror(errno)); + close(fd); + return -1; + } + + bzero((char *)&rawAdd, sizeof(rawAdd)); + rawAdd.sin_family = AF_INET; + rawAdd.sin_addr.s_addr = inet_addr(ip); + + if (bind(fd, (struct sockaddr *)&rawAdd, sizeof(rawAdd)) < 0) { + pError("failed to bind RAW socket: %d (%s)", errno, strerror(errno)); + close(fd); + return -1; + } + + return fd; +} + +void tinet_ntoa(char *ipstr, unsigned int ip) { + sprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip >> 8) & 0xFF, (ip >> 16) & 0xFF, ip >> 24); +} + +#define COPY_SIZE 32768 +// sendfile shall be used + +int taosCopyFds(int sfd, int dfd, int64_t len) { + int64_t leftLen; + int readLen, writeLen; + char temp[COPY_SIZE]; + + leftLen = len; + + while (leftLen > 0) { + if (leftLen < COPY_SIZE) + readLen = (int)leftLen; + else + readLen = COPY_SIZE; // 4K + + int retLen = taosReadMsg(sfd, temp, (int)readLen); + if (readLen != retLen) { + pError("read error, readLen:%d retLen:%d len:%ld leftLen:%ld, reason:%s", readLen, retLen, len, leftLen, + strerror(errno)); + return -1; + } + + writeLen = taosWriteMsg(dfd, temp, readLen); + + if (readLen != writeLen) { + pError("copy error, readLen:%d writeLen:%d len:%ld leftLen:%ld, reason:%s", readLen, writeLen, len, leftLen, + strerror(errno)); + return -1; + } + + leftLen -= readLen; + } + + return 0; +} diff --git a/src/util/src/tstatus.c b/src/util/src/tstatus.c new file mode 100644 index 000000000000..8539944dfb51 --- /dev/null +++ b/src/util/src/tstatus.c @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +char* sdbDnodeStatusStr[] = {"offline", "creating", "unsynced", "slave", "master", "ready"}; + +char* sdbDnodeBalanceStateStr[] = {"balanced", "balancing", "offline removing", "shell removing"}; diff --git a/src/util/src/tstoken.c b/src/util/src/tstoken.c new file mode 100644 index 000000000000..42f12d499f0d --- /dev/null +++ b/src/util/src/tstoken.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "shash.h" +#include "tstoken.h" + +static char operator[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '$', '%', '&', 0, '(', ')', '*', '+', + 0, '-', 0, '/', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '<', '=', '>', 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, '[', 0, ']', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '|', 0, 0, 0}; + +static char delimiter[] = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ',', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ';', 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +bool isCharInDelimiter(char c, char *delimiter) { + for (int i = 0; i < strlen(delimiter); i++) { + if (delimiter[i] == c) return true; + } + return false; +} + +char *tscGetTokenDelimiter(char *string, char **token, int *tokenLen, char *delimiters) { + while (*string != 0) { + if (isCharInDelimiter(*string, delimiters)) { + ++string; + } else { + break; + } + } + + *token = string; + + char *str = string; + *tokenLen = 0; + while (*str != 0) { + if (!isCharInDelimiter(*str, delimiters)) { + *tokenLen = *tokenLen + 1; + str++; + } else { + break; + } + } + + return string; +} + +char *tscGetToken(char *string, char **token, int *tokenLen) { + char quote = 0; + + while (*string != 0) { + if (delimiter[*string]) { + ++string; + } else { + break; + } + } + + char quotaChar = 0; + if (*string == '\'' || *string == '\"') { + quote = 1; + quotaChar = *string; + string++; + } + + *token = string; + /* not in string, return token */ + if (*string > 0 && operator[*string] && quote == 0) { + string++; + /* handle the case: insert into tabx using stable1 tags(-1)/tags(+1) + * values(....) */ + if (operator[*string] &&(*string != '(' && *string != ')' && *string != '-' && *string != '+')) + *tokenLen = 2; + else + *tokenLen = 1; + return *token + *tokenLen; + } + + while (*string != 0) { + if (quote) { + // handle escape situation: '\"', the " should not be eliminated + if (*string == quotaChar) { + if (*(string - 1) != '\\') { + break; + } else { + shiftStr(string - 1, string); + } + } else { + ++string; + } + } else { + if (delimiter[*string]) break; + + if (operator[*string]) break; + + ++string; + } + } + + *tokenLen = (int)(string - *token); + + if (quotaChar != 0 && *string != 0 && *(string + 1) != 0) { + return string + 1; + } else { + return string; + } +} + +void shiftStr(char *dst, char *src) { + int32_t i = 0; + do { + dst[i] = src[i]; + i++; + } while (delimiter[src[i]] == 0); + + src[i - 1] = ' '; +} diff --git a/src/util/src/tsystem.c b/src/util/src/tsystem.c new file mode 100644 index 000000000000..5b4754db992b --- /dev/null +++ b/src/util/src/tsystem.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tglobalcfg.h" +#include "tlog.h" +#include "tsystem.h" +#include "tutil.h" + +#define PROCESS_ITEM 12 + +typedef struct { + uint64_t user; + uint64_t nice; + uint64_t system; + uint64_t idle; +} SysCpuInfo; + +typedef struct { + uint64_t utime; // user time + uint64_t stime; // kernel time + uint64_t cutime; // all user time + uint64_t cstime; // all dead time +} ProcCpuInfo; + +static pid_t tsProcId; +static char tsSysNetFile[] = "/proc/net/dev"; +static char tsSysCpuFile[] = "/proc/stat"; +static char tsProcCpuFile[25] = {0}; +static char tsProcMemFile[25] = {0}; +static char tsProcIOFile[25] = {0}; +static float tsPageSizeKB = 0; + +bool taosGetSysMemory(float *memoryUsedMB) { + float memoryAvailMB = (float)sysconf(_SC_AVPHYS_PAGES) * tsPageSizeKB / 1024; + *memoryUsedMB = (float)tsTotalMemoryMB - memoryAvailMB; + return true; +} + +bool taosGetProcMemory(float *memoryUsedMB) { + FILE *fp = fopen(tsProcMemFile, "r"); + if (fp == NULL) { + pError("open file:%s failed", tsProcMemFile); + return false; + } + + size_t len; + char * line = NULL; + while (!feof(fp)) { + tfree(line); + getline(&line, &len, fp); + if (line == NULL) { + break; + } + if (strstr(line, "VmRSS:") != NULL) { + break; + } + } + + if (line == NULL) { + pError("read file:%s failed", tsProcMemFile); + fclose(fp); + return false; + } + + int64_t memKB = 0; + char tmp[10]; + sscanf(line, "%s %ld", tmp, &memKB); + *memoryUsedMB = (float)((double)memKB / 1024); + + tfree(line); + fclose(fp); + return true; +} + +bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { + FILE *fp = fopen(tsSysCpuFile, "r"); + if (fp == NULL) { + pError("open file:%s failed", tsSysCpuFile); + return false; + } + + size_t len; + char * line = NULL; + getline(&line, &len, fp); + if (line == NULL) { + pError("read file:%s failed", tsSysCpuFile); + fclose(fp); + return false; + } + + char cpu[10] = {0}; + sscanf(line, "%s %ld %ld %ld %ld", cpu, &cpuInfo->user, &cpuInfo->nice, &cpuInfo->system, &cpuInfo->idle); + + tfree(line); + fclose(fp); + return true; +} + +bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { + FILE *fp = fopen(tsProcCpuFile, "r"); + if (fp == NULL) { + pError("open file:%s failed", tsProcCpuFile); + return false; + } + + size_t len; + char * line = NULL; + getline(&line, &len, fp); + if (line == NULL) { + pError("read file:%s failed", tsProcCpuFile); + fclose(fp); + return false; + } + + for (int i = 0, blank = 0; line[i] != 0; ++i) { + if (line[i] == ' ') blank++; + if (blank == PROCESS_ITEM) { + sscanf(line + i + 1, "%ld %ld %ld %ld", &cpuInfo->utime, &cpuInfo->stime, &cpuInfo->cutime, &cpuInfo->cstime); + break; + } + } + + tfree(line); + fclose(fp); + return true; +} + +void taosGetSystemTimezone() { + SGlobalConfig *cfg_timezone = tsGetConfigOption("timezone"); + if (cfg_timezone == NULL) return; + if (cfg_timezone->cfgStatus >= TSDB_CFG_CSTATUS_DEFAULT) { + return; + } + + /* + * NOTE: do not remove it. + * Enforce set the correct daylight saving time(DST) flag according + * to current time + */ + time_t tx1 = time(NULL); + struct tm tm1; + localtime_r(&tx1, &tm1); + + /* load time zone string from /etc/timezone */ + FILE *f = fopen("/etc/timezone", "r"); + char buf[64] = {0}; + if (f != NULL) { + fread(buf, 64, 1, f); + fclose(f); + } + + char *lineEnd = strstr(buf, "\n"); + if (lineEnd != NULL) { + *lineEnd = 0; + } + + // for CentOS system, /etc/timezone does not exist. Ignore the TZ environment + // variables + if (strlen(buf) > 0) { + setenv("TZ", buf, 1); + } + + // get and set default timezone + tzset(); + + /* + * get CURRENT time zone. + * system current time zone is affected by daylight saving time(DST) + * + * e.g., the local time zone of London in DST is GMT+01:00, + * otherwise is GMT+00:00 + */ + int32_t tz = (-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR; + tz += daylight; + + /* + * format example: + * + * Asia/Shanghai (CST, +0800) + * Europe/London (BST, +0100) + */ + sprintf(tsTimezone, "%s (%s, %s%02d00)", buf, tzname[daylight], tz >= 0 ? "+" : "-", abs(tz)); + + cfg_timezone->cfgStatus = TSDB_CFG_CSTATUS_DEFAULT; + pPrint("timezone not configured, set to system default:%s", tsTimezone); +} + +typedef struct CharsetPair { + char *oldCharset; + char *newCharset; +} CharsetPair; + +char *taosCharsetReplace(char *charsetstr) { + CharsetPair charsetRep[] = { + {"utf8", "UTF-8"}, {"936", "CP936"}, + }; + + for (int32_t i = 0; i < tListLen(charsetRep); ++i) { + if (strcasecmp(charsetRep[i].oldCharset, charsetstr) == 0) { + return strdup(charsetRep[i].newCharset); + } + } + + return strdup(charsetstr); +} + +void taosGetSystemLocale() { // get and set default locale + /* + * POSIX format locale string: + * (Language Strings)_(Country/Region Strings).(code_page) + * + * example: en_US.UTF-8, zh_CN.GB18030, zh_CN.UTF-8, + * + * if user does not specify the locale in taos.cfg + * the program use default LC_CTYPE as system locale. + * + * In case of some CentOS systems, their default locale is "en_US.utf8", which + * is not + * valid code_page for libiconv that is employed to convert string in this + * system. + * User needs to specify the locale explicitly + * in config file in the correct format: en_US.UTF-8 + * + * In case of windows client, the locale string is not legal POSIX format, + * user needs to + * set the correct code_page for libiconv. Usually, the code_page of windows + * system + * with simple chinese is CP936, CP437 for English locale. + * + */ + char sep = '.'; + char *locale = NULL; + + SGlobalConfig *cfg_locale = tsGetConfigOption("locale"); + if (cfg_locale && cfg_locale->cfgStatus < TSDB_CFG_CSTATUS_DEFAULT) { + locale = setlocale(LC_CTYPE, ""); + if (locale == NULL) { + pError("can't get locale from system"); + } else { + strncpy(tsLocale, locale, sizeof(tsLocale) / sizeof(tsLocale[0])); + pPrint("locale not configured, set to system default:%s", tsLocale); + } + } + + /* if user does not specify the charset, extract it from locale */ + SGlobalConfig *cfg_charset = tsGetConfigOption("charset"); + if (cfg_charset && cfg_charset->cfgStatus < TSDB_CFG_CSTATUS_DEFAULT) { + char *str = strrchr(tsLocale, sep); + if (str != NULL) { + str++; + + char *revisedCharset = taosCharsetReplace(str); + strncpy(tsCharset, revisedCharset, sizeof(tsCharset) / sizeof(tsCharset[0])); + + free(revisedCharset); + pPrint("charset not configured, set to system default:%s", tsCharset); + } + } +} + +bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { + static uint64_t lastSysUsed = 0; + static uint64_t lastSysTotal = 0; + static uint64_t lastProcTotal = 0; + + SysCpuInfo sysCpu; + ProcCpuInfo procCpu; + if (!taosGetSysCpuInfo(&sysCpu)) { + return false; + } + if (!taosGetProcCpuInfo(&procCpu)) { + return false; + } + + uint64_t curSysUsed = sysCpu.user + sysCpu.nice + sysCpu.system; + uint64_t curSysTotal = curSysUsed + sysCpu.idle; + uint64_t curProcTotal = procCpu.utime + procCpu.stime + procCpu.cutime + procCpu.cstime; + + if (lastSysUsed == 0 || lastSysTotal == 0 || lastProcTotal == 0) { + lastSysUsed = curSysUsed > 1 ? curSysUsed : 1; + lastSysTotal = curSysTotal > 1 ? curSysTotal : 1; + lastProcTotal = curProcTotal > 1 ? curProcTotal : 1; + return false; + } + + if (curSysTotal == lastSysTotal) { + return false; + } + + *sysCpuUsage = (float)((double)(curSysUsed - lastSysUsed) / (double)(curSysTotal - lastSysTotal) * 100); + *procCpuUsage = (float)((double)(curProcTotal - lastProcTotal) / (double)(curSysTotal - lastSysTotal) * 100); + + lastSysUsed = curSysUsed; + lastSysTotal = curSysTotal; + lastProcTotal = curProcTotal; + + return true; +} + +bool taosGetDisk(float *diskUsedGB) { + struct statvfs info; + const double unit = 1024 * 1024 * 1024; + + if (statvfs(tsDirectory, &info)) { + *diskUsedGB = 0; + tsTotalDiskGB = 0; + return false; + } + + float diskAvail = (float)((double)info.f_bavail * (double)info.f_frsize / unit); + tsTotalDiskGB = (int32_t)((double)info.f_blocks * (double)info.f_frsize / unit); + *diskUsedGB = (float)tsTotalDiskGB - diskAvail; + + return true; +} + +bool taosGetCardName(char *ip, char *name) { + struct ifaddrs *ifaddr, *ifa; + int family, s; + char host[NI_MAXHOST]; + bool ret = false; + + if (getifaddrs(&ifaddr) == -1) { + return false; + } + + /* Walk through linked list, maintaining head pointer so we can free list + * later */ + for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) { + if (ifa->ifa_addr == NULL) continue; + + family = ifa->ifa_addr->sa_family; + if (family != AF_INET) { + continue; + } + + s = getnameinfo(ifa->ifa_addr, (family == AF_INET) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6), host, + NI_MAXHOST, NULL, 0, NI_NUMERICHOST); + if (s != 0) { + break; + } + + if (strcmp(host, ip) == 0) { + strcpy(name, ifa->ifa_name); + ret = true; + } + } + + freeifaddrs(ifaddr); + return ret; +} + +bool taosGetCardInfo(int64_t *bytes) { + static char tsPublicCard[1000] = {0}; + if (tsPublicCard[0] == 0) { + if (!taosGetCardName(tsInternalIp, tsPublicCard)) { + pError("can't get card name from ip:%s", tsInternalIp); + return false; + } + int cardNameLen = (int)strlen(tsPublicCard); + for (int i = 0; i < cardNameLen; ++i) { + if (tsPublicCard[i] == ':') { + tsPublicCard[i] = 0; + break; + } + } + // pTrace("card name of public ip:%s is %s", tsPublicIp, tsPublicCard); + } + + FILE *fp = fopen(tsSysNetFile, "r"); + if (fp == NULL) { + pError("open file:%s failed", tsSysNetFile); + return false; + } + + int64_t rbytes, rpackts, tbytes, tpackets; + int64_t nouse1, nouse2, nouse3, nouse4, nouse5, nouse6; + char nouse0[200] = {0}; + + size_t len; + char * line = NULL; + + while (!feof(fp)) { + tfree(line); + getline(&line, &len, fp); + if (line == NULL) { + break; + } + if (strstr(line, tsPublicCard) != NULL) { + break; + } + } + if (line != NULL) { + sscanf(line, "%s %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld", nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, + &nouse4, &nouse5, &nouse6, &tbytes, &tpackets); + *bytes = rbytes + tbytes; + tfree(line); + fclose(fp); + return true; + } else { + pWarn("can't get card:%s info from device:%s", tsPublicCard, tsSysNetFile); + *bytes = 0; + fclose(fp); + return false; + } +} + +bool taosGetBandSpeed(float *bandSpeedKb) { + static int64_t lastBytes = 0; + static time_t lastTime = 0; + int64_t curBytes = 0; + time_t curTime = time(NULL); + + if (!taosGetCardInfo(&curBytes)) { + return false; + } + + if (lastTime == 0 || lastBytes == 0) { + lastTime = curTime; + lastBytes = curBytes; + return false; + } + + if (lastTime >= curTime || lastBytes > curBytes) { + lastTime = curTime; + lastBytes = curBytes; + return false; + } + + double totalBytes = (double)(curBytes - lastBytes) / 1024 * 8; // Kb + *bandSpeedKb = (float)(totalBytes / (double)(curTime - lastTime)); + + // pPrint("bandwidth lastBytes:%ld, lastTime:%ld, curBytes:%ld, curTime:%ld, + // speed:%f", lastBytes, lastTime, curBytes, curTime, *bandSpeed); + + lastTime = curTime; + lastBytes = curBytes; + + return true; +} + +bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { + FILE *fp = fopen(tsProcIOFile, "r"); + if (fp == NULL) { + pError("open file:%s failed", tsProcIOFile); + return false; + } + + size_t len; + char * line = NULL; + char tmp[10]; + int readIndex = 0; + + while (!feof(fp)) { + tfree(line); + getline(&line, &len, fp); + if (line == NULL) { + break; + } + if (strstr(line, "rchar:") != NULL) { + sscanf(line, "%s %ld", tmp, readbyte); + readIndex++; + } else if (strstr(line, "wchar:") != NULL) { + sscanf(line, "%s %ld", tmp, writebyte); + readIndex++; + } else { + } + + if (readIndex >= 2) break; + } + + tfree(line); + fclose(fp); + + if (readIndex < 2) { + pError("read file:%s failed", tsProcIOFile); + return false; + } + + return true; +} + +bool taosGetProcIO(float *readKB, float *writeKB) { + static int64_t lastReadbyte = -1; + static int64_t lastWritebyte = -1; + + int64_t curReadbyte, curWritebyte; + + if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { + return false; + } + + if (lastReadbyte == -1 || lastWritebyte == -1) { + lastReadbyte = curReadbyte; + lastWritebyte = curWritebyte; + return false; + } + + *readKB = (float)((double)(curReadbyte - lastReadbyte) / 1024); + *writeKB = (float)((double)(curWritebyte - lastWritebyte) / 1024); + if (*readKB < 0) *readKB = 0; + if (*writeKB < 0) *writeKB = 0; + + lastReadbyte = curReadbyte; + lastWritebyte = curWritebyte; + + return true; +} + +void taosGetSystemInfo() { + tsNumOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN); + tsPageSize = sysconf(_SC_PAGESIZE); + tsOpenMax = sysconf(_SC_OPEN_MAX); + tsStreamMax = sysconf(_SC_STREAM_MAX); + + tsProcId = (pid_t)syscall(SYS_gettid); + tsPageSizeKB = (float)(sysconf(_SC_PAGESIZE)) / 1024; + tsTotalMemoryMB = (int32_t)((float)sysconf(_SC_PHYS_PAGES) * tsPageSizeKB / 1024); + + snprintf(tsProcMemFile, 25, "/proc/%d/status", tsProcId); + snprintf(tsProcCpuFile, 25, "/proc/%d/stat", tsProcId); + snprintf(tsProcIOFile, 25, "/proc/%d/io", tsProcId); + + float tmp1, tmp2; + taosGetSysMemory(&tmp1); + taosGetProcMemory(&tmp2); + taosGetDisk(&tmp1); + taosGetBandSpeed(&tmp1); + taosGetCpuUsage(&tmp1, &tmp2); + taosGetProcIO(&tmp1, &tmp2); + + taosGetSystemTimezone(); + taosGetSystemLocale(); +} + +void tsPrintOsInfo() { + pPrint(" os pageSize: %ld(KB)", tsPageSize); + pPrint(" os openMax: %ld", tsOpenMax); + pPrint(" os streamMax: %ld", tsStreamMax); + pPrint(" os numOfCores: %d", tsNumOfCores); + pPrint(" os totalDisk: %d(GB)", tsTotalDiskGB); + pPrint(" os totalMemory: %d(MB)", tsTotalMemoryMB); + + struct utsname buf; + if (uname(&buf)) { + pPrint(" can't fetch os info"); + return; + } + pPrint(" os sysname: %s", buf.sysname); + pPrint(" os nodename: %s", buf.nodename); + pPrint(" os release: %s", buf.release); + pPrint(" os version: %s", buf.version); + pPrint(" os machine: %s", buf.machine); +} + +void taosKillSystem() { + // SIGINT + pPrint("taosd will shut down soon"); + kill(tsProcId, 2); +} \ No newline at end of file diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c new file mode 100644 index 000000000000..a60c60bba07e --- /dev/null +++ b/src/util/src/ttime.c @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _XOPEN_SOURCE +#define _BSD_SOURCE + +#include +#include +#include + +#include "tsdb.h" +#include "ttime.h" +#include "tutil.h" + +static int64_t parseFraction(char* str, char** end, int32_t timePrec); +static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec); +static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec); + +int32_t taosGetTimestampSec() { return (int32_t)time(NULL); } + +int64_t taosGetTimestampMs() { + struct timeval systemTime; + gettimeofday(&systemTime, NULL); + return (int64_t)systemTime.tv_sec * 1000L + (uint64_t)systemTime.tv_usec / 1000; +} + +int64_t taosGetTimestampUs() { + struct timeval systemTime; + gettimeofday(&systemTime, NULL); + return (int64_t)systemTime.tv_sec * 1000000L + (uint64_t)systemTime.tv_usec; +} + +/* + * If tsTimePrecision == 1, taosGetTimestamp will return timestamp in microsecond. + * Otherwise, it will return timestamp in millisecond. + */ +int64_t taosGetTimestamp(int32_t precision) { + if (precision == TSDB_TIME_PRECISION_MICRO) { + return taosGetTimestampUs(); + } else { + return taosGetTimestampMs(); + } +} + +int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec) { + /* parse datatime string in with tz */ + if (strnchr(timestr, 'T', len) != NULL) { + return parseTimeWithTz(timestr, time, timePrec); + } else { + return parseLocaltime(timestr, time, timePrec); + } +} + +char* forwardToTimeStringEnd(char* str) { + int32_t i = 0; + int32_t numOfSep = 0; + + while (str[i] != 0 && numOfSep < 2) { + if (str[i++] == ':') { + numOfSep++; + } + } + + while (str[i] >= '0' && str[i] <= '9') { + i++; + } + + return &str[i]; +} + +int64_t parseFraction(char* str, char** end, int32_t timePrec) { + int32_t i = 0; + int64_t fraction = 0; + + const int32_t MILLI_SEC_FRACTION_LEN = 3; + const int32_t MICRO_SEC_FRACTION_LEN = 6; + + int32_t factor[6] = {1, 10, 100, 1000, 10000, 100000}; + int32_t times = 1; + + while (str[i] >= '0' && str[i] <= '9') { + i++; + } + + int32_t totalLen = i; + if (totalLen <= 0) { + return -1; + } + + /* parse the fraction */ + if (timePrec == TSDB_TIME_PRECISION_MILLI) { + /* only use the initial 3 bits */ + if (i >= MILLI_SEC_FRACTION_LEN) { + i = MILLI_SEC_FRACTION_LEN; + } + + times = MILLI_SEC_FRACTION_LEN - i; + } else { + assert(timePrec == TSDB_TIME_PRECISION_MICRO); + if (i >= MICRO_SEC_FRACTION_LEN) { + i = MICRO_SEC_FRACTION_LEN; + } + times = MICRO_SEC_FRACTION_LEN - i; + } + + fraction = strnatoi(str, i) * factor[times]; + *end = str + totalLen; + + return fraction; +} + +int32_t parseTimezone(char* str, int64_t* tzOffset) { + int64_t hour = 0; + + int32_t i = 0; + if (str[i] != '+' && str[i] != '-') { + return -1; + } + + i++; + + char* sep = strchr(&str[i], ':'); + if (sep != NULL) { + int32_t len = sep - &str[i]; + + hour = strnatoi(&str[i], len); + i += len + 1; + } else { + hour = strnatoi(&str[i], 2); + i += 2; + } + + if (hour > 12) { + return -1; + } + + int64_t sec = strnatoi(&str[i], 2); + if (sec > 70) { + return -1; + } + + sec += (hour * 3600); + + if (str[0] == '+') { + *tzOffset = -sec; + } else { + *tzOffset = sec; + } + + return 0; +} + +/* + * rfc3339 format: + * 2013-04-12T15:52:01+08:00 + * 2013-04-12T15:52:01.123+08:00 + * + * 2013-04-12T15:52:01Z + * 2013-04-12T15:52:01.123Z + * + * iso-8601 format: + * 2013-04-12T15:52:01+0800 + * 2013-04-12T15:52:01.123+0800 + */ +int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec) { + int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000; + int64_t tzOffset = 0; + + struct tm tm = {0}; + char* str = strptime(timestr, "%Y-%m-%dT%H:%M:%S", &tm); + if (str == NULL) { + return -1; + } + + /* mktime will be affected by TZ, set by using taos_options */ + int64_t seconds = timegm(&tm); + + int64_t fraction = 0; + str = forwardToTimeStringEnd(timestr); + + if (str[0] == 'Z' || str[0] == 'z') { + /* utc time, no millisecond, return directly*/ + *time = seconds * factor; + } else if (str[0] == '.') { + str += 1; + if ((fraction = parseFraction(str, &str, timePrec)) < 0) { + return -1; + } + + *time = seconds * factor + fraction; + + char seg = str[0]; + if (seg != 'Z' && seg != 'z' && seg != '+' && seg != '-') { + return -1; + } else if (seg == '+' || seg == '-') { + // parse the timezone + if (parseTimezone(str, &tzOffset) == -1) { + return -1; + } + + *time += tzOffset * factor; + } + + } else if (str[0] == '+' || str[0] == '-') { + *time = seconds * factor + fraction; + + // parse the timezone + if (parseTimezone(str, &tzOffset) == -1) { + return -1; + } + + *time += tzOffset * factor; + } else { + return -1; + } + + return 0; +} + +int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) { + *time = 0; + struct tm tm = {0}; + + char* str = strptime(timestr, "%Y-%m-%d %H:%M:%S", &tm); + if (str == NULL) { + return -1; + } + + /* mktime will be affected by TZ, set by using taos_options */ + int64_t seconds = mktime(&tm); + int64_t fraction = 0; + + if (*str == '.') { + /* parse the second fraction part */ + if ((fraction = parseFraction(str + 1, &str, timePrec)) < 0) { + return -1; + } + } + + int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000; + *time = factor * seconds + fraction; + + return 0; +} + +static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) { + *result = val; + + switch (unit) { + case 's': + (*result) *= MILLISECOND_PER_SECOND; + break; + case 'm': + (*result) *= MILLISECOND_PER_MINUTE; + break; + case 'h': + (*result) *= MILLISECOND_PER_HOUR; + break; + case 'd': + (*result) *= MILLISECOND_PER_DAY; + break; + case 'w': + (*result) *= MILLISECOND_PER_WEEK; + break; + case 'n': + (*result) *= MILLISECOND_PER_MONTH; + break; + case 'y': + (*result) *= MILLISECOND_PER_YEAR; + break; + case 'a': + break; + default: { + ; + return -1; + } + } + + /* get the value in microsecond */ + (*result) *= 1000L; + return 0; +} + +/* + * a - Millionseconds + * s - Seconds + * m - Minutes + * h - Hours + * d - Days (24 hours) + * w - Weeks (7 days) + * n - Months (30 days) + * y - Years (365 days) + */ +int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) { + errno = 0; + char* endPtr = NULL; + + /* get the basic numeric value */ + int64_t timestamp = strtoll(token, &endPtr, 10); + if (errno != 0) { + return -1; + } + + return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts); +} diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c new file mode 100644 index 000000000000..ff3277b86c20 --- /dev/null +++ b/src/util/src/ttimer.c @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tidpool.h" +#include "tlog.h" +#include "tsched.h" +#include "ttimer.h" +#include "tutil.h" + +// special mempool without mutex +#define mpool_h void * + +typedef struct { + int numOfFree; /* number of free slots */ + int first; /* the first free slot */ + int numOfBlock; /* the number of blocks */ + int blockSize; /* block size in bytes */ + int * freeList; /* the index list */ + char *pool; /* the actual mem block */ +} pool_t; + +mpool_h tmrMemPoolInit(int maxNum, int blockSize); +char *tmrMemPoolMalloc(mpool_h handle); +void tmrMemPoolFree(mpool_h handle, char *p); +void tmrMemPoolCleanUp(mpool_h handle); + +#define tmrError(...) \ + if (tmrDebugFlag & DEBUG_ERROR) { \ + tprintf("ERROR TMR ", tmrDebugFlag, __VA_ARGS__); \ + } +#define tmrWarn(...) \ + if (tmrDebugFlag & DEBUG_WARN) { \ + tprintf("WARN TMR ", tmrDebugFlag, __VA_ARGS__); \ + } +#define tmrTrace(...) \ + if (tmrDebugFlag & DEBUG_TRACE) { \ + tprintf("TMR ", tmrDebugFlag, __VA_ARGS__); \ + } + +#define maxNumOfTmrCtrl 512 +#define MSECONDS_PER_TICK 5 + +typedef struct _tmr_obj { + void *param1; + void (*fp)(void *, void *); + tmr_h timerId; + short cycle; + struct _tmr_obj * prev; + struct _tmr_obj * next; + int index; + struct _tmr_ctrl_t *pCtrl; +} tmr_obj_t; + +typedef struct { + tmr_obj_t *head; + int count; +} tmr_list_t; + +typedef struct _tmr_ctrl_t { + void * signature; + pthread_mutex_t mutex; /* mutex to protect critical resource */ + int resolution; /* resolution in mseconds */ + int numOfPeriods; /* total number of periods */ + int64_t periodsFromStart; /* count number of periods since start */ + pthread_t thread; /* timer thread ID */ + tmr_list_t * tmrList; + mpool_h poolHandle; + char label[12]; + int maxNumOfTmrs; + int numOfTmrs; + int ticks; + int maxTicks; + int tmrCtrlId; +} tmr_ctrl_t; + +int tmrDebugFlag = DEBUG_ERROR | DEBUG_WARN | DEBUG_FILE; +void taosTmrProcessList(tmr_ctrl_t *); + +tmr_ctrl_t tmrCtrl[maxNumOfTmrCtrl]; +int numOfTmrCtrl = 0; +void * tmrIdPool = NULL; +void * tmrQhandle; +int taosTmrThreads = 1; + +void *taosTimerLoopFunc(int signo) { + tmr_ctrl_t *pCtrl; + int count = 0; + + for (int i = 1; i < maxNumOfTmrCtrl; ++i) { + pCtrl = tmrCtrl + i; + if (pCtrl->signature) { + count++; + pCtrl->ticks++; + if (pCtrl->ticks >= pCtrl->maxTicks) { + taosTmrProcessList(pCtrl); + pCtrl->ticks = 0; + } + if (count >= numOfTmrCtrl) break; + } + } + + return NULL; +} + +void *taosProcessAlarmSignal(void *tharg) { + // Block the signal + sigset_t sigset; + sigemptyset(&sigset); + sigaddset(&sigset, SIGALRM); + sigprocmask(SIG_BLOCK, &sigset, NULL); + + timer_t timerId; + struct sigevent sevent; + sevent.sigev_notify = SIGEV_THREAD_ID; + sevent._sigev_un._tid = syscall(__NR_gettid); + sevent.sigev_signo = SIGALRM; + + if (timer_create(CLOCK_REALTIME, &sevent, &timerId) == -1) { + tmrError("Failed to create timer"); + } + + struct itimerspec ts; + ts.it_value.tv_sec = 0; + ts.it_value.tv_nsec = 1000000 * MSECONDS_PER_TICK; + ts.it_interval.tv_sec = 0; + ts.it_interval.tv_nsec = 1000000 * MSECONDS_PER_TICK; + + if (timer_settime(timerId, 0, &ts, NULL)) { + tmrError("Failed to init timer"); + return NULL; + } + + int signo; + while (1) { + if (sigwait(&sigset, &signo)) { + tmrError("Failed to wait signal: number %d", signo); + continue; + } + /* printf("Signal handling: number %d ......\n", signo); */ + + taosTimerLoopFunc(0); + } + + assert(0); + return NULL; +} + +void taosTmrModuleInit(void) { + tmrIdPool = taosInitIdPool(maxNumOfTmrCtrl); + memset(tmrCtrl, 0, sizeof(tmrCtrl)); + + pthread_t thread; + pthread_attr_t tattr; + pthread_attr_init(&tattr); + pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED); + if (pthread_create(&thread, &tattr, taosProcessAlarmSignal, NULL) != 0) { + tmrError("failed to create timer thread"); + return; + } + + pthread_attr_destroy(&tattr); + + tmrQhandle = taosInitScheduler(10000, taosTmrThreads, "tmr"); + tmrTrace("timer module is initialized, thread:%d", taosTmrThreads); +} + +void *taosTmrInit(int maxNumOfTmrs, int resolution, int longest, char *label) { + static pthread_once_t tmrInit = PTHREAD_ONCE_INIT; + tmr_ctrl_t * pCtrl; + + pthread_once(&tmrInit, taosTmrModuleInit); + + int tmrCtrlId = taosAllocateId(tmrIdPool); + + if (tmrCtrlId < 0) { + tmrError("%s bug!!! too many timers!!!", label); + return NULL; + } + + pCtrl = tmrCtrl + tmrCtrlId; + tfree(pCtrl->tmrList); + tmrMemPoolCleanUp(pCtrl->poolHandle); + + memset(pCtrl, 0, sizeof(tmr_ctrl_t)); + + pCtrl->tmrCtrlId = tmrCtrlId; + strcpy(pCtrl->label, label); + pCtrl->maxNumOfTmrs = maxNumOfTmrs; + + if ((pCtrl->poolHandle = tmrMemPoolInit(maxNumOfTmrs + 10, sizeof(tmr_obj_t))) == NULL) { + tmrError("%s failed to allocate mem pool", label); + tmrMemPoolCleanUp(pCtrl->poolHandle); + return NULL; + } + + if (resolution < MSECONDS_PER_TICK) resolution = MSECONDS_PER_TICK; + pCtrl->resolution = resolution; + pCtrl->maxTicks = resolution / MSECONDS_PER_TICK; + pCtrl->ticks = rand() / pCtrl->maxTicks; + pCtrl->numOfPeriods = longest / resolution; + if (pCtrl->numOfPeriods < 10) pCtrl->numOfPeriods = 10; + + pCtrl->tmrList = (tmr_list_t *)malloc(sizeof(tmr_list_t) * pCtrl->numOfPeriods); + for (int i = 0; i < pCtrl->numOfPeriods; i++) { + pCtrl->tmrList[i].head = NULL; + pCtrl->tmrList[i].count = 0; + } + + if (pthread_mutex_init(&pCtrl->mutex, NULL) < 0) { + tmrError("%s failed to create the mutex, reason:%s", label, strerror(errno)); + taosTmrCleanUp(pCtrl); + return NULL; + } + + pCtrl->signature = pCtrl; + numOfTmrCtrl++; + tmrTrace("%s timer ctrl is initialized, index:%d", label, tmrCtrlId); + return pCtrl; +} + +void taosTmrProcessList(tmr_ctrl_t *pCtrl) { + unsigned int index; + tmr_list_t * pList; + tmr_obj_t * pObj, *header; + + pthread_mutex_lock(&pCtrl->mutex); + index = pCtrl->periodsFromStart % pCtrl->numOfPeriods; + pList = &pCtrl->tmrList[index]; + + while (1) { + header = pList->head; + if (header == NULL) break; + + if (header->cycle > 0) { + pObj = header; + while (pObj) { + pObj->cycle--; + pObj = pObj->next; + } + break; + } + + pCtrl->numOfTmrs--; + tmrTrace("%s %p, timer expired, fp:%p, tmr_h:%p, index:%d, total:%d", pCtrl->label, header->param1, header->fp, + header, index, pCtrl->numOfTmrs); + + pList->head = header->next; + if (header->next) header->next->prev = NULL; + pList->count--; + header->timerId = NULL; + + SSchedMsg schedMsg; + schedMsg.fp = NULL; + schedMsg.tfp = header->fp; + schedMsg.ahandle = header->param1; + schedMsg.thandle = header; + taosScheduleTask(tmrQhandle, &schedMsg); + + tmrMemPoolFree(pCtrl->poolHandle, (char *)header); + } + + pCtrl->periodsFromStart++; + pthread_mutex_unlock(&pCtrl->mutex); +} + +void taosTmrCleanUp(void *handle) { + tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; + if (pCtrl == NULL || pCtrl->signature != pCtrl) return; + + pCtrl->signature = NULL; + taosFreeId(tmrIdPool, pCtrl->tmrCtrlId); + numOfTmrCtrl--; + tmrTrace("%s is cleaned up, numOfTmrs:%d", pCtrl->label, numOfTmrCtrl); +} + +tmr_h taosTmrStart(void (*fp)(void *, void *), int mseconds, void *param1, void *handle) { + tmr_obj_t * pObj, *cNode, *pNode; + tmr_list_t *pList; + int index, period; + tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; + + if (handle == NULL) return NULL; + + period = mseconds / pCtrl->resolution; + if (pthread_mutex_lock(&pCtrl->mutex) != 0) + tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); + + pObj = (tmr_obj_t *)tmrMemPoolMalloc(pCtrl->poolHandle); + if (pObj == NULL) { + tmrError("%s reach max number of timers:%d", pCtrl->label, pCtrl->maxNumOfTmrs); + pthread_mutex_unlock(&pCtrl->mutex); + return NULL; + } + + pObj->cycle = period / pCtrl->numOfPeriods; + pObj->param1 = param1; + pObj->fp = fp; + pObj->timerId = pObj; + pObj->pCtrl = pCtrl; + + index = (period + pCtrl->periodsFromStart) % pCtrl->numOfPeriods; + int cindex = (pCtrl->periodsFromStart) % pCtrl->numOfPeriods; + pList = &(pCtrl->tmrList[index]); + + pObj->index = index; + cNode = pList->head; + pNode = NULL; + + while (cNode != NULL) { + if (cNode->cycle < pObj->cycle) { + pNode = cNode; + cNode = cNode->next; + } else { + break; + } + } + + pObj->next = cNode; + pObj->prev = pNode; + + if (cNode != NULL) { + cNode->prev = pObj; + } + + if (pNode != NULL) { + pNode->next = pObj; + } else { + pList->head = pObj; + } + + pList->count++; + pCtrl->numOfTmrs++; + + if (pthread_mutex_unlock(&pCtrl->mutex) != 0) + tmrError("%s mutex unlock failed, reason:%s", pCtrl->label, strerror(errno)); + + tmrTrace("%s %p, timer started, fp:%p, tmr_h:%p, index:%d, total:%d cindex:%d", pCtrl->label, param1, fp, pObj, index, + pCtrl->numOfTmrs, cindex); + + return (tmr_h)pObj; +} + +void taosTmrStop(tmr_h timerId) { + tmr_obj_t * pObj; + tmr_list_t *pList; + tmr_ctrl_t *pCtrl; + + pObj = (tmr_obj_t *)timerId; + if (pObj == NULL) return; + + pCtrl = pObj->pCtrl; + if (pCtrl == NULL) return; + + if (pthread_mutex_lock(&pCtrl->mutex) != 0) + tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); + + if (pObj->timerId == timerId) { + pList = &(pCtrl->tmrList[pObj->index]); + if (pObj->prev) { + pObj->prev->next = pObj->next; + } else { + pList->head = pObj->next; + } + + if (pObj->next) { + pObj->next->prev = pObj->prev; + } + + pList->count--; + pObj->timerId = NULL; + pCtrl->numOfTmrs--; + + tmrTrace("%s %p, timer stopped, fp:%p, tmr_h:%p, total:%d", pCtrl->label, pObj->param1, pObj->fp, pObj, + pCtrl->numOfTmrs); + tmrMemPoolFree(pCtrl->poolHandle, (char *)(pObj)); + } + + pthread_mutex_unlock(&pCtrl->mutex); +} + +void taosTmrStopA(tmr_h *timerId) { + tmr_obj_t * pObj; + tmr_list_t *pList; + tmr_ctrl_t *pCtrl; + + pObj = *(tmr_obj_t **)timerId; + if (pObj == NULL) return; + + pCtrl = pObj->pCtrl; + if (pCtrl == NULL) return; + + if (pthread_mutex_lock(&pCtrl->mutex) != 0) + tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); + + if (pObj->timerId == pObj) { + pList = &(pCtrl->tmrList[pObj->index]); + if (pObj->prev) { + pObj->prev->next = pObj->next; + } else { + pList->head = pObj->next; + } + + if (pObj->next) { + pObj->next->prev = pObj->prev; + } + + pList->count--; + pObj->timerId = NULL; + pCtrl->numOfTmrs--; + + tmrTrace("%s %p, timer stopped atomiclly, fp:%p, tmr_h:%p, total:%d", pCtrl->label, pObj->param1, pObj->fp, pObj, + pCtrl->numOfTmrs); + tmrMemPoolFree(pCtrl->poolHandle, (char *)(pObj)); + + *(tmr_obj_t **)timerId = NULL; + } else { + tmrTrace("%s %p, timer stopped atomiclly, fp:%p, tmr_h:%p, total:%d", pCtrl->label, pObj->param1, pObj->fp, pObj, + pCtrl->numOfTmrs); + } + + pthread_mutex_unlock(&pCtrl->mutex); +} + +void taosTmrReset(void (*fp)(void *, void *), int mseconds, void *param1, void *handle, tmr_h *pTmrId) { + tmr_obj_t * pObj, *cNode, *pNode; + tmr_list_t *pList; + int index, period; + tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; + + if (handle == NULL) return; + if (pTmrId == NULL) return; + + period = mseconds / pCtrl->resolution; + if (pthread_mutex_lock(&pCtrl->mutex) != 0) + tmrError("%s mutex lock failed, reason:%s", pCtrl->label, strerror(errno)); + + pObj = (tmr_obj_t *)(*pTmrId); + + if (pObj && pObj->timerId == *pTmrId) { + // exist, stop it first + pList = &(pCtrl->tmrList[pObj->index]); + if (pObj->prev) { + pObj->prev->next = pObj->next; + } else { + pList->head = pObj->next; + } + + if (pObj->next) { + pObj->next->prev = pObj->prev; + } + + pList->count--; + pObj->timerId = NULL; + pCtrl->numOfTmrs--; + } else { + // timer not there, or already expired + pObj = (tmr_obj_t *)tmrMemPoolMalloc(pCtrl->poolHandle); + *pTmrId = pObj; + + if (pObj == NULL) { + tmrError("%s failed to allocate timer, max:%d allocated:%d", pCtrl->label, pCtrl->maxNumOfTmrs, pCtrl->numOfTmrs); + pthread_mutex_unlock(&pCtrl->mutex); + return; + } + } + + pObj->cycle = period / pCtrl->numOfPeriods; + pObj->param1 = param1; + pObj->fp = fp; + pObj->timerId = pObj; + pObj->pCtrl = pCtrl; + + index = (period + pCtrl->periodsFromStart) % pCtrl->numOfPeriods; + pList = &(pCtrl->tmrList[index]); + + pObj->index = index; + cNode = pList->head; + pNode = NULL; + + while (cNode != NULL) { + if (cNode->cycle < pObj->cycle) { + pNode = cNode; + cNode = cNode->next; + } else { + break; + } + } + + pObj->next = cNode; + pObj->prev = pNode; + + if (cNode != NULL) { + cNode->prev = pObj; + } + + if (pNode != NULL) { + pNode->next = pObj; + } else { + pList->head = pObj; + } + + pList->count++; + pCtrl->numOfTmrs++; + + if (pthread_mutex_unlock(&pCtrl->mutex) != 0) + tmrError("%s mutex unlock failed, reason:%s", pCtrl->label, strerror(errno)); + + tmrTrace("%s %p, timer is reset, fp:%p, tmr_h:%p, index:%d, total:%d numOfFree:%d", pCtrl->label, param1, fp, pObj, + index, pCtrl->numOfTmrs, ((pool_t *)pCtrl->poolHandle)->numOfFree); + + return; +} + +void taosTmrList(void *handle) { + int i; + tmr_list_t *pList; + tmr_obj_t * pObj; + tmr_ctrl_t *pCtrl = (tmr_ctrl_t *)handle; + + for (i = 0; i < pCtrl->numOfPeriods; ++i) { + pList = &(pCtrl->tmrList[i]); + pObj = pList->head; + if (!pObj) continue; + printf("\nindex=%d count:%d\n", i, pList->count); + while (pObj) { + pObj = pObj->next; + } + } +} + +mpool_h tmrMemPoolInit(int numOfBlock, int blockSize) { + int i; + pool_t *pool_p; + + if (numOfBlock <= 1 || blockSize <= 1) { + tmrError("invalid parameter in memPoolInit\n"); + return NULL; + } + + pool_p = (pool_t *)malloc(sizeof(pool_t)); + if (pool_p == NULL) { + tmrError("mempool malloc failed\n"); + return NULL; + } else { + memset(pool_p, 0, sizeof(pool_t)); + } + + pool_p->blockSize = blockSize; + pool_p->numOfBlock = numOfBlock; + pool_p->pool = (char *)malloc(blockSize * numOfBlock); + memset(pool_p->pool, 0, blockSize * numOfBlock); + pool_p->freeList = (int *)malloc(sizeof(int) * numOfBlock); + + if (pool_p->pool == NULL || pool_p->freeList == NULL) { + tmrError("failed to allocate memory\n"); + free(pool_p->freeList); + free(pool_p->pool); + free(pool_p); + return NULL; + } + + for (i = 0; i < pool_p->numOfBlock; ++i) pool_p->freeList[i] = i; + + pool_p->first = 0; + pool_p->numOfFree = pool_p->numOfBlock; + + return (mpool_h)pool_p; +} + +char *tmrMemPoolMalloc(mpool_h handle) { + char * pos = NULL; + pool_t *pool_p = (pool_t *)handle; + + if (pool_p->numOfFree <= 0 || pool_p->numOfFree > pool_p->numOfBlock) { + tmrError("mempool: out of memory, numOfFree:%d, numOfBlock:%d", pool_p->numOfFree, pool_p->numOfBlock); + } else { + pos = pool_p->pool + pool_p->blockSize * (pool_p->freeList[pool_p->first]); + pool_p->first++; + pool_p->first = pool_p->first % pool_p->numOfBlock; + pool_p->numOfFree--; + } + + return pos; +} + +void tmrMemPoolFree(mpool_h handle, char *pMem) { + int index; + pool_t *pool_p = (pool_t *)handle; + + if (pMem == NULL) return; + + index = (int)(pMem - pool_p->pool) / pool_p->blockSize; + + if (index < 0 || index >= pool_p->numOfBlock) { + tmrError("tmr mempool: error, invalid address:%p\n", pMem); + } else { + memset(pMem, 0, pool_p->blockSize); + pool_p->freeList[(pool_p->first + pool_p->numOfFree) % pool_p->numOfBlock] = index; + pool_p->numOfFree++; + } +} + +void tmrMemPoolCleanUp(mpool_h handle) { + pool_t *pool_p = (pool_t *)handle; + if (pool_p == NULL) return; + + if (pool_p->pool) free(pool_p->pool); + if (pool_p->freeList) free(pool_p->freeList); + memset(&pool_p, 0, sizeof(pool_p)); + free(pool_p); +} diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c new file mode 100644 index 000000000000..ad8dae36a45a --- /dev/null +++ b/src/util/src/ttokenizer.c @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#include "shash.h" +#include "tsql.h" +#include "tutil.h" + +//All the keywords of the SQL language are stored in a hash table +typedef struct SKeyword { + const char* name; // The keyword name + uint8_t type; // type + uint8_t len; // length +} SKeyword; + +static SKeyword keywordTable[] = { + {"ID", TK_ID}, + {"BOOL", TK_BOOL}, + {"TINYINT", TK_TINYINT}, + {"SMALLINT", TK_SMALLINT}, + {"INTEGER", TK_INTEGER}, + {"INT", TK_INTEGER}, + {"BIGINT", TK_BIGINT}, + {"FLOAT", TK_FLOAT}, + {"DOUBLE", TK_DOUBLE}, + {"STRING", TK_STRING}, + {"TIMESTAMP", TK_TIMESTAMP}, + {"BINARY", TK_BINARY}, + {"NCHAR", TK_NCHAR}, + {"OR", TK_OR}, + {"AND", TK_AND}, + {"NOT", TK_NOT}, + {"EQ", TK_EQ}, + {"NE", TK_NE}, + {"ISNULL", TK_ISNULL}, + {"NOTNULL", TK_NOTNULL}, + {"IS", TK_IS}, + {"LIKE", TK_LIKE}, + {"GLOB", TK_GLOB}, + {"BETWEEN", TK_BETWEEN}, + {"IN", TK_IN}, + {"GT", TK_GT}, + {"GE", TK_GE}, + {"LT", TK_LT}, + {"LE", TK_LE}, + {"BITAND", TK_BITAND}, + {"BITOR", TK_BITOR}, + {"LSHIFT", TK_LSHIFT}, + {"RSHIFT", TK_RSHIFT}, + {"PLUS", TK_PLUS}, + {"MINUS", TK_MINUS}, + {"DIVIDE", TK_DIVIDE}, + {"TIMES", TK_TIMES}, + {"STAR", TK_STAR}, + {"SLASH", TK_SLASH}, + {"REM ", TK_REM}, + {"CONCAT", TK_CONCAT}, + {"UMINUS", TK_UMINUS}, + {"UPLUS", TK_UPLUS}, + {"BITNOT", TK_BITNOT}, + {"SHOW", TK_SHOW}, + {"DATABASES", TK_DATABASES}, + {"MNODES", TK_MNODES}, + {"DNODES", TK_DNODES}, + {"USERS", TK_USERS}, + {"MODULES", TK_MODULES}, + {"QUERIES", TK_QUERIES}, + {"CONNECTIONS", TK_CONNECTIONS}, + {"STREAMS", TK_STREAMS}, + {"CONFIGS", TK_CONFIGS}, + {"SCORES", TK_SCORES}, + {"GRANTS", TK_GRANTS}, + {"DOT", TK_DOT}, + {"TABLES", TK_TABLES}, + {"STABLES", TK_STABLES}, + {"VGROUPS", TK_VGROUPS}, + {"DROP", TK_DROP}, + {"TABLE", TK_TABLE}, + {"DATABASE", TK_DATABASE}, + {"USER", TK_USER}, + {"USE", TK_USE}, + {"DESCRIBE", TK_DESCRIBE}, + {"ALTER", TK_ALTER}, + {"PASS", TK_PASS}, + {"PRIVILEGE", TK_PRIVILEGE}, + {"DNODE", TK_DNODE}, + {"IP", TK_IP}, + {"LOCAL", TK_LOCAL}, + {"IF", TK_IF}, + {"EXISTS", TK_EXISTS}, + {"CREATE", TK_CREATE}, + {"KEEP", TK_KEEP}, + {"REPLICA", TK_REPLICA}, + {"DAYS", TK_DAYS}, + {"ROWS", TK_ROWS}, + {"CACHE", TK_CACHE}, + {"ABLOCKS", TK_ABLOCKS}, + {"TBLOCKS", TK_TBLOCKS}, + {"CTIME", TK_CTIME}, + {"CLOG", TK_CLOG}, + {"COMP", TK_COMP}, + {"PRECISION", TK_PRECISION}, + {"LP", TK_LP}, + {"RP", TK_RP}, + {"TAGS", TK_TAGS}, + {"USING", TK_USING}, + {"AS", TK_AS}, + {"COMMA", TK_COMMA}, + {"NULL", TK_NULL}, + {"SELECT", TK_SELECT}, + {"FROM", TK_FROM}, + {"VARIABLE", TK_VARIABLE}, + {"INTERVAL", TK_INTERVAL}, + {"FILL", TK_FILL}, + {"SLIDING", TK_SLIDING}, + {"ORDER", TK_ORDER}, + {"BY", TK_BY}, + {"ASC", TK_ASC}, + {"DESC", TK_DESC}, + {"GROUP", TK_GROUP}, + {"HAVING", TK_HAVING}, + {"LIMIT", TK_LIMIT}, + {"OFFSET", TK_OFFSET}, + {"SLIMIT", TK_SLIMIT}, + {"SOFFSET", TK_SOFFSET}, + {"WHERE", TK_WHERE}, + {"NOW", TK_NOW}, + {"INSERT", TK_INSERT}, + {"INTO", TK_INTO}, + {"VALUES", TK_VALUES}, + {"RESET", TK_RESET}, + {"QUERY", TK_QUERY}, + {"ADD", TK_ADD}, + {"COLUMN", TK_COLUMN}, + {"TAG", TK_TAG}, + {"CHANGE", TK_CHANGE}, + {"SET", TK_SET}, + {"KILL", TK_KILL}, + {"CONNECTION", TK_CONNECTION}, + {"COLON", TK_COLON}, + {"STREAM", TK_STREAM}, + {"ABORT", TK_ABORT}, + {"AFTER", TK_AFTER}, + {"ATTACH", TK_ATTACH}, + {"BEFORE", TK_BEFORE}, + {"BEGIN", TK_BEGIN}, + {"CASCADE", TK_CASCADE}, + {"CLUSTER", TK_CLUSTER}, + {"CONFLICT", TK_CONFLICT}, + {"COPY", TK_COPY}, + {"DEFERRED", TK_DEFERRED}, + {"DELIMITERS", TK_DELIMITERS}, + {"DETACH", TK_DETACH}, + {"EACH", TK_EACH}, + {"END", TK_END}, + {"EXPLAIN", TK_EXPLAIN}, + {"FAIL", TK_FAIL}, + {"FOR", TK_FOR}, + {"IGNORE", TK_IGNORE}, + {"IMMEDIATE", TK_IMMEDIATE}, + {"INITIALLY", TK_INITIALLY}, + {"INSTEAD", TK_INSTEAD}, + {"MATCH", TK_MATCH}, + {"KEY", TK_KEY}, + {"OF", TK_OF}, + {"RAISE", TK_RAISE}, + {"REPLACE", TK_REPLACE}, + {"RESTRICT", TK_RESTRICT}, + {"ROW", TK_ROW}, + {"STATEMENT", TK_STATEMENT}, + {"TRIGGER", TK_TRIGGER}, + {"VIEW", TK_VIEW}, + {"ALL", TK_ALL}, + {"COUNT", TK_COUNT}, + {"SUM", TK_SUM}, + {"AVG", TK_AVG}, + {"MIN", TK_MIN}, + {"MAX", TK_MAX}, + {"FIRST", TK_FIRST}, + {"LAST", TK_LAST}, + {"TOP", TK_TOP}, + {"BOTTOM", TK_BOTTOM}, + {"STDDEV", TK_STDDEV}, + {"PERCENTILE", TK_PERCENTILE}, + {"APERCENTILE", TK_APERCENTILE}, + {"LEASTSQUARES",TK_LEASTSQUARES}, + {"HISTOGRAM", TK_HISTOGRAM}, + {"DIFF", TK_DIFF}, + {"SPREAD", TK_SPREAD}, + {"WAVG", TK_WAVG}, + {"INTERP", TK_INTERP}, + {"LAST_ROW", TK_LAST_ROW}, + {"SEMI", TK_SEMI,}, + {"NONE", TK_NONE,}, + {"PREV", TK_PREV,}, + {"LINEAR", TK_LINEAR,}, + {"IMPORT", TK_IMPORT}, + {"METRIC", TK_METRIC}, + {"TBNAME", TK_TBNAME}, + {"JOIN", TK_JOIN}, + {"METRICS", TK_METRICS}, + {"STABLE", TK_STABLE} +}; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +static const char isIdChar[] = { + /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; + +static void* KeywordHashTable = NULL; +int tSQLKeywordCode(const char* z, int n) { + int i; + static char needInit = 1; + if (needInit) { + /* Initialize the keyword hash table */ + pthread_mutex_lock(&mutex); + + // double check + if (needInit) { + int nk = tListLen(keywordTable); + + KeywordHashTable = taosInitStrHash(nk, POINTER_BYTES, taosHashStringStep1); + for (i = 0; i < nk; i++) { + keywordTable[i].len = strlen(keywordTable[i].name); + void* ptr = &keywordTable[i]; + taosAddStrHash(KeywordHashTable, (char*)keywordTable[i].name, (void*)&ptr); + } + needInit = 0; + } + pthread_mutex_unlock(&mutex); + } + + char key[128] = {0}; + for (int32_t j = 0; j < n; ++j) { + if (z[j] >= 'a' && z[j] <= 'z') { + key[j] = (char)(z[j] & 0xDF); // touppercase and set the null-terminated + } else { + key[j] = z[j]; + } + } + + SKeyword** pKey = (SKeyword**)taosGetStrHashData(KeywordHashTable, key); + if (pKey != NULL) { + return (*pKey)->type; + } else { + return TK_ID; + } +} + +uint32_t tSQLGetToken(char* z, uint32_t* tokenType) { + int i; + switch (*z) { + case ' ': + case '\t': + case '\n': + case '\f': + case '\r': { + for (i = 1; isspace(z[i]); i++) { + } + *tokenType = TK_SPACE; + return i; + } + case ':': { + *tokenType = TK_COLON; + return 1; + } + case '-': { + if (z[1] == '-') { + for (i = 2; z[i] && z[i] != '\n'; i++) { + } + *tokenType = TK_COMMENT; + return i; + } + *tokenType = TK_MINUS; + return 1; + } + case '(': { + *tokenType = TK_LP; + return 1; + } + case ')': { + *tokenType = TK_RP; + return 1; + } + case ';': { + *tokenType = TK_SEMI; + return 1; + } + case '+': { + *tokenType = TK_PLUS; + return 1; + } + case '*': { + *tokenType = TK_STAR; + return 1; + } + case '/': { + if (z[1] != '*' || z[2] == 0) { + *tokenType = TK_SLASH; + return 1; + } + for (i = 3; z[i] && (z[i] != '/' || z[i - 1] != '*'); i++) { + } + if (z[i]) i++; + *tokenType = TK_COMMENT; + return i; + } + case '%': { + *tokenType = TK_REM; + return 1; + } + case '=': { + *tokenType = TK_EQ; + return 1 + (z[1] == '='); + } + case '<': { + if (z[1] == '=') { + *tokenType = TK_LE; + return 2; + } else if (z[1] == '>') { + *tokenType = TK_NE; + return 2; + } else if (z[1] == '<') { + *tokenType = TK_LSHIFT; + return 2; + } else { + *tokenType = TK_LT; + return 1; + } + } + case '>': { + if (z[1] == '=') { + *tokenType = TK_GE; + return 2; + } else if (z[1] == '>') { + *tokenType = TK_RSHIFT; + return 2; + } else { + *tokenType = TK_GT; + return 1; + } + } + case '!': { + if (z[1] != '=') { + *tokenType = TK_ILLEGAL; + return 2; + } else { + *tokenType = TK_NE; + return 2; + } + } + case '|': { + if (z[1] != '|') { + *tokenType = TK_BITOR; + return 1; + } else { + *tokenType = TK_CONCAT; + return 2; + } + } + case ',': { + *tokenType = TK_COMMA; + return 1; + } + case '&': { + *tokenType = TK_BITAND; + return 1; + } + case '~': { + *tokenType = TK_BITNOT; + return 1; + } + case '\'': + case '"': { + int delim = z[0]; + for (i = 1; z[i]; i++) { + if (z[i] == delim) { + if (z[i + 1] == delim) { + i++; + } else { + break; + } + } + } + if (z[i]) i++; + *tokenType = TK_STRING; + return i; + } + case '.': { + *tokenType = TK_DOT; + return 1; + } + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { + *tokenType = TK_INTEGER; + for (i = 1; isdigit(z[i]); i++) { + } + + /* here is the 1a/2s/3m/9y */ + if ((z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' || z[i] == 'y' || + z[i] == 'w' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' || + z[i] == 'Y' || z[i] == 'W') && + (isIdChar[z[i + 1]] == 0)) { + *tokenType = TK_VARIABLE; + i += 1; + return i; + } + + int32_t seg = 1; + while (z[i] == '.' && isdigit(z[i + 1])) { + i += 2; + while (isdigit(z[i])) { + i++; + } + *tokenType = TK_FLOAT; + seg++; + } + + if (seg == 4) { // ip address + *tokenType = TK_IP; + return i; + } + + if ((z[i] == 'e' || z[i] == 'E') && + (isdigit(z[i + 1]) || ((z[i + 1] == '+' || z[i + 1] == '-') && isdigit(z[i + 2])))) { + i += 2; + while (isdigit(z[i])) { + i++; + } + *tokenType = TK_FLOAT; + } + return i; + } + case '[': { + for (i = 1; z[i] && z[i - 1] != ']'; i++) { + } + *tokenType = TK_ID; + return i; + } + case 'T': + case 't': + case 'F': + case 'f': { + for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[z[i]]; i++) { + } + + if ((i == 4 && strncasecmp(z, "true", 4) == 0) || (i == 5 && strncasecmp(z, "false", 5) == 0)) { + *tokenType = TK_BOOL; + return i; + } + } + default: { + if (((*z & 0x80) != 0) || !isIdChar[*z]) { + break; + } + for (i = 1; ((z[i] & 0x80) == 0) && isIdChar[z[i]]; i++) { + } + *tokenType = tSQLKeywordCode(z, i); + return i; + } + } + + *tokenType = TK_ILLEGAL; + return 0; +} + +void tStrGetToken(char* str, int32_t* i, SSQLToken* t0, bool isPrevOptr) { + // here we reach the end of sql string, null-terminated string + if (str[*i] == 0) { + t0->n = 0; + return; + } + + t0->n = tSQLGetToken(&str[*i], &t0->type); + + /* IGNORE all space between valid tokens */ + while (t0->type == TK_SPACE) { + *i += t0->n; + t0->n = tSQLGetToken(&str[*i], &t0->type); + } + + /* support parse the -/+number format */ + if ((isPrevOptr) && (t0->type == TK_MINUS || t0->type == TK_PLUS)) { + uint32_t type = 0; + int32_t len = tSQLGetToken(&str[*i + t0->n], &type); + if (type == TK_INTEGER || type == TK_FLOAT) { + t0->type = type; + t0->n += len; + } + } + + t0->z = str + (*i); + *i += t0->n; +} + +bool isKeyWord(const char* z, int32_t len) { + int32_t tokenType = tSQLKeywordCode((char*)z, len); + return (tokenType != TK_ID); +} + +bool isNumber(const SSQLToken* pToken) { return (pToken->type == TK_INTEGER || pToken->type == TK_FLOAT); } diff --git a/src/util/src/ttypes.c b/src/util/src/ttypes.c new file mode 100644 index 000000000000..747019e0440a --- /dev/null +++ b/src/util/src/ttypes.c @@ -0,0 +1,857 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "taos.h" +#include "tsdb.h" +#include "tsqldef.h" +#include "ttypes.h" +#include "tutil.h" + +tDataDescriptor tDataTypeDesc[11] = { + {TSDB_DATA_TYPE_NULL, 6, 1, "NOTYPE"}, + {TSDB_DATA_TYPE_BOOL, 4, CHAR_BYTES, "BOOL"}, + {TSDB_DATA_TYPE_TINYINT, 7, CHAR_BYTES, "TINYINT"}, + {TSDB_DATA_TYPE_SMALLINT, 8, SHORT_BYTES, "SMALLINT"}, + {TSDB_DATA_TYPE_INT, 3, INT_BYTES, "INT"}, + {TSDB_DATA_TYPE_BIGINT, 6, LONG_BYTES, "BIGINT"}, + {TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT"}, + {TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE"}, + {TSDB_DATA_TYPE_BINARY, 6, 0, "BINARY"}, + {TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP"}, + {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR"}, +}; + +char tTokenTypeSwitcher[] = { + TSDB_DATA_TYPE_NULL, // no type + TSDB_DATA_TYPE_BINARY, // TK_ID + TSDB_DATA_TYPE_BOOL, // TK_BOOL + TSDB_DATA_TYPE_BIGINT, // TK_TINYINT + TSDB_DATA_TYPE_BIGINT, // TK_SMALLINT + TSDB_DATA_TYPE_BIGINT, // TK_INTEGER + TSDB_DATA_TYPE_BIGINT, // TK_BIGINT + TSDB_DATA_TYPE_DOUBLE, // TK_FLOAT + TSDB_DATA_TYPE_DOUBLE, // TK_DOUBLE + TSDB_DATA_TYPE_BINARY, // TK_STRING + TSDB_DATA_TYPE_BIGINT, // TK_TIMESTAMP + TSDB_DATA_TYPE_BINARY, // TK_BINARY + TSDB_DATA_TYPE_NCHAR, // TK_NCHAR +}; + +bool isValidDataType(int32_t type, int32_t length) { + if (type < TSDB_DATA_TYPE_BOOL || type > TSDB_DATA_TYPE_NCHAR) { + return false; + } + + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + return length >= 0 && length <= TSDB_MAX_BINARY_LEN; + } + + return true; +} + +// todo support scientific expression number and oct number +void tVariantCreate(tVariant *pVar, SSQLToken *token) { tVariantCreateN(pVar, token->z, token->n, token->type); } + +void tVariantCreateN(tVariant *pVar, char *pz, uint32_t len, uint32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: { + int32_t k = strncasecmp(pz, "true", 4); + if (k == 0) { + pVar->i64Key = TSDB_TRUE; + } else { + assert(strncasecmp(pz, "false", 5) == 0); + pVar->i64Key = TSDB_FALSE; + } + break; + } + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_INT: + pVar->i64Key = strtoll(pz, NULL, 10); + break; + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_FLOAT: + pVar->dKey = strtod(pz, NULL); + break; + case TSDB_DATA_TYPE_BINARY: { + pVar->pz = malloc(len + 1); + strncpy(pVar->pz, pz, len); + pVar->nLen = len; + pVar->pz[len] = 0; + + pVar->nLen = strdequote(pVar->pz); + break; + } + default: + assert(false); + } + + pVar->nType = type; +} + +/** + * create tVariant from binary string, not ascii data + * @param pVar + * @param pz + * @param len + * @param type + */ +void tVariantCreateB(tVariant *pVar, char *pz, uint32_t len, uint32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + pVar->i64Key = *(int8_t *)pz; + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + pVar->i64Key = *(int16_t *)pz; + break; + } + case TSDB_DATA_TYPE_INT: { + pVar->i64Key = *(int32_t *)pz; + break; + } + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: { + pVar->i64Key = *(int64_t *)pz; + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + pVar->dKey = *(double *)pz; + break; + } + case TSDB_DATA_TYPE_FLOAT: { + pVar->dKey = *(float *)pz; + break; + } + case TSDB_DATA_TYPE_NCHAR: { + /* here we get the nchar length from raw binary bits length */ + int32_t wlen = len / TSDB_NCHAR_SIZE; + pVar->wpz = malloc((len + 1) * wlen); + + wcsncpy(pVar->wpz, (wchar_t *)pz, wlen); + pVar->nLen = wlen; + pVar->wpz[wlen] = 0; + + break; + } + case TSDB_DATA_TYPE_BINARY: { + pVar->pz = malloc(len + 1); + strncpy(pVar->pz, pz, len); + pVar->nLen = len; + pVar->pz[len] = 0; + + strdequote(pVar->pz); + pVar->nLen = strlen(pVar->pz); + + break; + } + } + + pVar->nType = type; +} +void tVariantDestroy(tVariant *pVar) { + if (pVar == NULL) return; + + if ((pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) && pVar->nLen > 0) { + free(pVar->pz); + pVar->pz = NULL; + pVar->nLen = 0; + } +} + +void tVariantAssign(tVariant *pDst, tVariant *pSrc) { + if (pSrc == NULL || pDst == NULL) return; + + *pDst = *pSrc; + + if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) { + int32_t len = pSrc->nLen + 1; + if (pSrc->nType == TSDB_DATA_TYPE_NCHAR) { + len = len * TSDB_NCHAR_SIZE; + } + + pDst->pz = calloc(1, len); + memcpy(pDst->pz, pSrc->pz, len); + } +} + +int32_t tVariantToString(tVariant *pVar, char *dst) { + if (pVar == NULL || dst == NULL) return 0; + + switch (pVar->nType) { + case TSDB_DATA_TYPE_BINARY: { + int32_t len = sprintf(dst, "\'%s\'", pVar->pz); + assert(len <= pVar->nLen + sizeof("\'") * 2); // two more chars + return len; + } + + case TSDB_DATA_TYPE_NCHAR: { + dst[0] = '\''; + taosUcs4ToMbs(pVar->wpz, (wcslen(pVar->wpz) + 1) * TSDB_NCHAR_SIZE, dst + 1); + int32_t len = strlen(dst); + dst[len] = '\''; + dst[len + 1] = 0; + return len + 1; + } + + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_INT: + return sprintf(dst, "%d", (int32_t)pVar->i64Key); + + case TSDB_DATA_TYPE_BIGINT: + return sprintf(dst, "%ld", pVar->i64Key); + + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: + return sprintf(dst, "%.9lf", pVar->dKey); + + default: + return 0; + } +} + +static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, bool releaseVariantPtr) { + if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + *((int64_t *)pDest) = pVariant->i64Key; + + } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { + if ((pVariant->dKey < INT64_MIN) || (pVariant->dKey > INT64_MAX)) { + return -1; + } + + *((int64_t *)pDest) = (int64_t)pVariant->dKey; + } else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + errno = 0; + char *endPtr = NULL; + + SSQLToken token = {0}; + token.n = tSQLGetToken(pVariant->pz, &token.type); + + if (token.type == TK_MINUS || token.type == TK_PLUS) { + token.n = tSQLGetToken(pVariant->pz + token.n, &token.type); + } + + if (token.type == TK_FLOAT) { + double v = strtod(pVariant->pz, &endPtr); + if (releaseVariantPtr) { + free(pVariant->pz); + pVariant->nLen = 0; + } + + if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) { + return -1; + } + + if ((v < INT64_MIN) || (v > INT64_MAX)) { + return -1; + } + + *((int64_t *)pDest) = (int64_t)v; + } else if (token.type == TK_INTEGER) { + int64_t val = strtoll(pVariant->pz, &endPtr, 10); + if (releaseVariantPtr) { + free(pVariant->pz); + pVariant->nLen = 0; + } + + if (errno == ERANGE) { + return -1; // data overflow + } + + *((int64_t *)pDest) = val; + } else if (strncasecmp(TSDB_DATA_NULL_STR_L, pVariant->pz, pVariant->nLen) == 0 && + strlen(TSDB_DATA_NULL_STR_L) == pVariant->nLen) { + setNull(pDest, type, tDataTypeDesc[type].nSize); + } else { + return -1; + } + + } else if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) { + errno = 0; + wchar_t *endPtr = NULL; + + SSQLToken token = {0}; + token.n = tSQLGetToken(pVariant->pz, &token.type); + + if (token.type == TK_MINUS || token.type == TK_PLUS) { + token.n = tSQLGetToken(pVariant->pz + token.n, &token.type); + } + + if (token.type == TK_FLOAT) { + double v = wcstod(pVariant->wpz, &endPtr); + if (releaseVariantPtr) { + free(pVariant->pz); + pVariant->nLen = 0; + } + + if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) { + return -1; + } + + if ((v < INT64_MIN) || (v > INT64_MAX)) { + return -1; + } + + *((int64_t *)pDest) = (int64_t)v; + } else { + int64_t val = wcstoll(pVariant->wpz, &endPtr, 10); + if (releaseVariantPtr) { + free(pVariant->pz); + pVariant->nLen = 0; + } + + if (errno == ERANGE) { + return -1; // data overflow + } + + *((int64_t *)pDest) = val; + } + } + + return 0; +} + +static FORCE_INLINE int32_t convertToBoolImpl(char *pStr, int32_t len) { + if ((strncasecmp(pStr, "true", len) == 0) && (len == 4)) { + return TSDB_TRUE; + } else if ((strncasecmp(pStr, "false", len) == 0) && (len == 5)) { + return TSDB_FALSE; + } else if (strcasecmp(pStr, TSDB_DATA_NULL_STR_L) == 0) { + return TSDB_DATA_BOOL_NULL; + } else { + return -1; + } +} + +static FORCE_INLINE int32_t wcsconvertToBoolImpl(wchar_t *pstr, int32_t len) { + if ((wcsncasecmp(pstr, L"true", len) == 0) && (len == 4)) { + return TSDB_TRUE; + } else if (wcsncasecmp(pstr, L"false", len) == 0 && (len == 5)) { + return TSDB_FALSE; + } else { + return -1; + } +} + +static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { + const int32_t INITIAL_ALLOC_SIZE = 20; + char * pBuf = NULL; + + if (*pDest == pVariant->pz) { + pBuf = calloc(1, INITIAL_ALLOC_SIZE); + } + + if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) { + size_t newSize = pVariant->nLen * TSDB_NCHAR_SIZE; + if (pBuf != NULL) { + if (newSize > INITIAL_ALLOC_SIZE) { + pBuf = realloc(pBuf, newSize + 1); + } + + taosUcs4ToMbs(pVariant->wpz, newSize, pBuf); + free(pVariant->wpz); + + /* terminated string */ + pBuf[newSize] = 0; + } else { + taosUcs4ToMbs(pVariant->wpz, newSize, *pDest); + } + + } else { + if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + sprintf(pBuf == NULL ? *pDest : pBuf, "%ld", pVariant->i64Key); + } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { + sprintf(pBuf == NULL ? *pDest : pBuf, "%lf", pVariant->dKey); + } else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) { + sprintf(pBuf == NULL ? *pDest : pBuf, "%s", (pVariant->i64Key == TSDB_TRUE) ? "TRUE" : "FALSE"); + } + } + + if (pBuf != NULL) { + *pDest = pBuf; + } + *pDestSize = strlen(*pDest); + return 0; +} + +static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { + char tmpBuf[40] = {0}; + + char * pDst = tmpBuf; + int32_t nLen = 0; + + if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + nLen = sprintf(pDst, "%ld", pVariant->i64Key); + } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { + nLen = sprintf(pDst, "%lf", pVariant->dKey); + } else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + pDst = pVariant->pz; + nLen = pVariant->nLen; + } else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) { + nLen = sprintf(pDst, "%s", (pVariant->i64Key == TSDB_TRUE) ? "TRUE" : "FALSE"); + } + + if (*pDest == pVariant->pz) { + wchar_t *pWStr = calloc(1, (nLen + 1) * TSDB_NCHAR_SIZE); + taosMbsToUcs4(pDst, nLen, (char *)pWStr, (nLen + 1) * TSDB_NCHAR_SIZE); + + if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + free(pVariant->wpz); + } + pVariant->wpz = pWStr; + + *pDestSize = wcslen(pVariant->wpz); + } else { + taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE); + } + + return 0; +} + +static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result, int32_t type, int64_t lowBnd, + int64_t upperBnd) { + if (doConvertToInteger(pVariant, (char *)result, type, false) != 0) { + return -1; + } + + if (isNull((char *)result, type)) { + return 0; + } + + if (*result < lowBnd || *result > upperBnd) { + return -1; + } + + return 0; +} + +static int32_t convertToBool(tVariant *pVariant, int64_t *pDest) { + if (pVariant->nType == TSDB_DATA_TYPE_BOOL) { + *pDest = pVariant->i64Key; // in order to be compatible to null of bool + } else if (pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + *pDest = ((pVariant->i64Key != 0) ? TSDB_TRUE : TSDB_FALSE); + } else if (pVariant->nType == TSDB_DATA_TYPE_FLOAT || pVariant->nType == TSDB_DATA_TYPE_DOUBLE) { + *pDest = ((pVariant->dKey != 0) ? TSDB_TRUE : TSDB_FALSE); + } else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + int32_t ret = 0; + if ((ret = convertToBoolImpl(pVariant->pz, pVariant->nLen)) < 0) { + return ret; + } + + *pDest = ret; + } else if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) { + int32_t ret = 0; + if ((ret = wcsconvertToBoolImpl(pVariant->wpz, pVariant->nLen)) < 0) { + return ret; + } + + *pDest = ret; + } + + assert(*pDest == TSDB_TRUE || *pDest == TSDB_FALSE || *pDest == TSDB_DATA_BOOL_NULL); + return 0; +} + +/* + * transfer data from variant serve as the implicit data conversion: from input sql string pVariant->nType + * to column type defined in schema + * + * todo handle the return value + */ +int32_t tVariantDump(tVariant *pVariant, char *payload, char type) { + if (pVariant == NULL || !isValidDataType(pVariant->nType, pVariant->nLen)) { // value is not set + return -1; + } + + char *endPtr = NULL; + errno = 0; // reset global error code + + switch (type) { + case TSDB_DATA_TYPE_BOOL: { // bool + int64_t dst = 0; + if (convertToBool(pVariant, &dst) < 0) { + return -1; + } + *(int8_t *)payload = (int8_t)dst; + break; + } + case TSDB_DATA_TYPE_TINYINT: { + int64_t result = 0; + if (convertToInteger(pVariant, &result, type, INT8_MIN, INT8_MAX) < 0) { + return -1; + } + + *((int8_t *)payload) = (int8_t)result; + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + int64_t result = 0; + if (convertToInteger(pVariant, &result, type, INT16_MIN, INT16_MAX) < 0) { + return -1; + } + + *((int16_t *)payload) = (int16_t)result; + break; + } + + case TSDB_DATA_TYPE_INT: { + int64_t result = 0; + if (convertToInteger(pVariant, &result, type, INT32_MIN, INT32_MAX) < 0) { + return -1; + } + + *((int32_t *)payload) = (int32_t)result; + break; + } + case TSDB_DATA_TYPE_BIGINT: { + int64_t result = 0; + if (doConvertToInteger(pVariant, (char *)&result, type, false) != 0) { + return -1; + } + + *((int64_t *)payload) = (int64_t)result; + break; + } + case TSDB_DATA_TYPE_FLOAT: { + if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + if (strncasecmp(TSDB_DATA_NULL_STR_L, pVariant->pz, pVariant->nLen) == 0 && + strlen(TSDB_DATA_NULL_STR_L) == pVariant->nLen) { + setNull(payload, type, tDataTypeDesc[type].nSize); + } else { + *((float *)payload) = (float)strtod(pVariant->pz, &endPtr); + if (errno == ERANGE && *(float *)payload == -1) { + return -1; + } + + if (isinf(*(float *)payload) || isnan(*(float *)payload)) { + return -1; + } + } + } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + *((float *)payload) = pVariant->i64Key; + } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { + *((float *)payload) = (float)pVariant->dKey; + + if (isinf(pVariant->dKey) || isnan(pVariant->dKey) || pVariant->dKey > FLT_MAX || pVariant->dKey < -FLT_MAX) { + return -1; + } + } + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + if (strncasecmp(TSDB_DATA_NULL_STR_L, pVariant->pz, pVariant->nLen) == 0 && + strlen(TSDB_DATA_NULL_STR_L) == pVariant->nLen) { + setNull(payload, type, tDataTypeDesc[type].nSize); + } else { + *((double *)payload) = strtod(pVariant->pz, &endPtr); + if (errno == ERANGE && *(double *)payload == -1) { + return -1; + } + + if (isnan(*(double *)payload) || isinf(*(double *)payload)) { + return -1; + } + } + } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + *((double *)payload) = pVariant->i64Key; + } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { + *((double *)payload) = pVariant->dKey; + if (isnan(pVariant->dKey) || isinf(pVariant->dKey)) { + return -1; + } + } + + break; + } + + case TSDB_DATA_TYPE_BINARY: { + if (pVariant->nType != TSDB_DATA_TYPE_BINARY) { + toBinary(pVariant, &payload, &pVariant->nLen); + } else { + strncpy(payload, pVariant->pz, pVariant->nLen); + } + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: { + *((int64_t *)payload) = pVariant->i64Key; + break; + } + case TSDB_DATA_TYPE_NCHAR: { + if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) { + toNchar(pVariant, &payload, &pVariant->nLen); + } else { + wcsncpy((wchar_t *)payload, pVariant->wpz, pVariant->nLen); + } + } + } + + return 0; +} + +/* + * In variant, bool/smallint/tinyint/int/bigint share the same attribution of + * structure, also ignore the convert the type required + * + * It is actually the bigint/binary/bool/nchar type transfer + */ +int32_t tVariantTypeSetType(tVariant *pVariant, char type) { + if (pVariant == NULL || pVariant->nType == 0) { // value is not set + return 0; + } + + switch (type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (convertToBool(pVariant, &pVariant->i64Key) < 0) { + return -1; + } + + pVariant->nType = type; + break; + } + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_SMALLINT: { + doConvertToInteger(pVariant, (char *)&pVariant->i64Key, type, true); + pVariant->nType = TSDB_DATA_TYPE_BIGINT; + break; + } + case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_DOUBLE: { + if (pVariant->nType == TSDB_DATA_TYPE_BINARY) { + errno = 0; + double v = strtod(pVariant->pz, NULL); + if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) { + free(pVariant->pz); + return -1; + } + + free(pVariant->pz); + pVariant->dKey = v; + } else if (pVariant->nType == TSDB_DATA_TYPE_NCHAR) { + errno = 0; + double v = wcstod(pVariant->wpz, NULL); + if ((errno == ERANGE && v == -1) || (isinf(v) || isnan(v))) { + free(pVariant->pz); + return -1; + } + + free(pVariant->pz); + pVariant->dKey = v; + } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { + pVariant->dKey = pVariant->i64Key; + } + + pVariant->nType = TSDB_DATA_TYPE_DOUBLE; + break; + } + case TSDB_DATA_TYPE_BINARY: { + if (pVariant->nType != TSDB_DATA_TYPE_BINARY) { + toBinary(pVariant, &pVariant->pz, &pVariant->nLen); + } + pVariant->nType = type; + break; + } + case TSDB_DATA_TYPE_NCHAR: { + if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) { + toNchar(pVariant, &pVariant->pz, &pVariant->nLen); + } + pVariant->nType = type; + break; + } + } + + return 0; +} + +bool isNull(const char *val, int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + return *(uint8_t *)val == TSDB_DATA_BOOL_NULL; + case TSDB_DATA_TYPE_TINYINT: + return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL; + case TSDB_DATA_TYPE_SMALLINT: + return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL; + case TSDB_DATA_TYPE_INT: + return *(uint32_t *)val == TSDB_DATA_INT_NULL; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL; + case TSDB_DATA_TYPE_FLOAT: + return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL; + case TSDB_DATA_TYPE_DOUBLE: + return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL; + case TSDB_DATA_TYPE_NCHAR: + return *(uint32_t *)val == TSDB_DATA_NCHAR_NULL; + case TSDB_DATA_TYPE_BINARY: + return *(uint8_t *)val == TSDB_DATA_BINARY_NULL; + default: + return false; + }; +} + +void setNull(char *val, int32_t type, int32_t bytes) { setNullN(val, type, bytes, 1); } + +void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BOOL_NULL; + } + break; + case TSDB_DATA_TYPE_TINYINT: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint8_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_TINYINT_NULL; + } + break; + case TSDB_DATA_TYPE_SMALLINT: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint16_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_SMALLINT_NULL; + } + break; + case TSDB_DATA_TYPE_INT: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_INT_NULL; + } + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_BIGINT_NULL; + } + break; + case TSDB_DATA_TYPE_FLOAT: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint32_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_FLOAT_NULL; + } + break; + case TSDB_DATA_TYPE_DOUBLE: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL; + } + break; + case TSDB_DATA_TYPE_NCHAR: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint32_t *)(val + i * bytes) = TSDB_DATA_NCHAR_NULL; + } + break; + case TSDB_DATA_TYPE_BINARY: + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint8_t *)(val + i * bytes) = TSDB_DATA_BINARY_NULL; + } + break; + default: { + for (int32_t i = 0; i < numOfElems; ++i) { + *(uint32_t *)(val + i * tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize) = TSDB_DATA_INT_NULL; + } + break; + } + } +} + +void assignVal(char *val, char *src, int32_t len, int32_t type) { + switch (type) { + case TSDB_DATA_TYPE_INT: { + *((int32_t *)val) = *(int32_t *)src; + break; + } + case TSDB_DATA_TYPE_FLOAT: { + *((float *)val) = *(float *)src; + break; + }; + case TSDB_DATA_TYPE_DOUBLE: { + *((double *)val) = *(double *)src; + break; + }; + case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: { + *((int64_t *)val) = *(int64_t *)src; + break; + }; + case TSDB_DATA_TYPE_SMALLINT: { + *((int16_t *)val) = *(int16_t *)src; + break; + }; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + *((int8_t *)val) = *(int8_t *)src; + break; + }; + default: { + memcpy(val, src, len); + break; + } + } +} + +void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) { + char tmpBuf[4096] = {0}; + + switch (type) { + case TSDB_DATA_TYPE_INT: { + SWAP(*(int32_t *)(pLeft), *(int32_t *)(pRight)); + break; + } + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: { + SWAP(*(int64_t *)(pLeft), *(int64_t *)(pRight)); + break; + } + case TSDB_DATA_TYPE_DOUBLE: { + SWAP(*(double *)(pLeft), *(double *)(pRight)); + break; + } + case TSDB_DATA_TYPE_SMALLINT: { + SWAP(*(int16_t *)(pLeft), *(int16_t *)(pRight)); + break; + } + + case TSDB_DATA_TYPE_FLOAT: { + SWAP(*(float *)(pLeft), *(float *)(pRight)); + break; + } + + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: { + SWAP(*(int8_t *)(pLeft), *(int8_t *)(pRight)); + break; + } + + default: { + assert(size <= 4096); + memcpy(tmpBuf, pLeft, size); + memcpy(pLeft, pRight, size); + memcpy(pRight, tmpBuf, size); + break; + } + } +} diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c new file mode 100644 index 000000000000..8a3f96ab7319 --- /dev/null +++ b/src/util/src/tutil.c @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef USE_LIBICONV +#include "iconv.h" +#endif + +#include "tcrc32c.h" +#include "tglobalcfg.h" +#include "ttime.h" +#include "ttypes.h" +#include "tutil.h" + +int32_t strdequote(char *z) { + if (z == NULL) { + return 0; + } + + int32_t quote = z[0]; + if (quote != '\'' && quote != '"') { + return (int32_t)strlen(z); + } + + int32_t i = 1, j = 0; + + while (z[i] != 0) { + if (z[i] == quote) { + if (z[i + 1] == quote) { + z[j++] = (char)quote; + i++; + } else { + z[j++] = 0; + return (j - 1); + } + } else { + z[j++] = z[i]; + } + + i++; + } + + return j + 1; // only one quote, do nothing +} + +void strtrim(char *z) { + int32_t i = 0; + int32_t j = 0; + + int32_t delta = 0; + while (z[j] == ' ') { + ++j; + } + + if (z[j] == 0) { + z[0] = 0; + return; + } + + delta = j; + + int32_t stop = 0; + while (z[j] != 0) { + if (z[j] == ' ' && stop == 0) { + stop = j; + } else if (z[j] != ' ' && stop != 0) { + stop = 0; + } + + z[i++] = z[j++]; + } + + if (stop > 0) { + z[stop - delta] = 0; + } else if (j != i) { + z[i] = 0; + } +} + +char **strsplit(char *z, const char *delim, int32_t *num) { + *num = 0; + int32_t size = 4; + + char **split = malloc(POINTER_BYTES * size); + + for (char *p = strsep(&z, delim); p != NULL; p = strsep(&z, delim)) { + size_t len = strlen(p); + if (len == 0) { + continue; + } + + split[(*num)++] = p; + if ((*num) >= size) { + size = (size << 1); + split = realloc(split, POINTER_BYTES * size); + } + } + + return split; +} + +char *strnchr(char *haystack, char needle, int32_t len) { + for (int32_t i = 0; i < len; ++i) { + if (haystack[i] == needle) { + return &haystack[i]; + } + } + + return NULL; +} + +void strtolower(char *z, char *dst) { + int quote = 0; + char *str = z; + if (dst == NULL) { + return; + } + + while (*str) { + if (*str == '\'' || *str == '"') { + quote = quote ^ 1; + } + + if ((!quote) && (*str >= 'A' && *str <= 'Z')) { + *dst++ = *str | 0x20; + } else { + *dst++ = *str; + } + + str++; + } +} + +char *paGetToken(char *string, char **token, int32_t *tokenLen) { + char quote = 0; + + while (*string != 0) { + if (*string == ' ' || *string == '\t') { + ++string; + } else { + break; + } + } + + if (*string == '@') { + quote = 1; + string++; + } + + *token = string; + + while (*string != 0) { + if (*string == '@' && quote) { + //*string = 0; + ++string; + break; + } + + if (*string == '#' || *string == '\n' || *string == '\r') { + *string = 0; + break; + } + + if ((*string == ' ' || *string == '\t') && !quote) { + break; + } else { + ++string; + } + } + + *tokenLen = (int32_t)(string - *token); + if (quote) { + *tokenLen = *tokenLen - 1; + } + + return string; +} + +int64_t strnatoi(char *num, int32_t len) { + int64_t ret = 0, i, dig, base = 1; + + if (len > (int32_t)strlen(num)) { + len = (int32_t)strlen(num); + } + + if ((len > 2) && (num[0] == '0') && ((num[1] == 'x') || (num[1] == 'X'))) { + for (i = len - 1; i >= 2; --i, base *= 16) { + if (num[i] >= '0' && num[i] <= '9') { + dig = (num[i] - '0'); + } else if (num[i] >= 'a' && num[i] <= 'f') { + dig = num[i] - 'a' + 10; + } else if (num[i] >= 'A' && num[i] <= 'F') { + dig = num[i] - 'A' + 10; + } else { + return 0; + } + ret = dig * base; + } + } else { + for (i = len - 1; i >= 0; --i, base *= 10) { + if (num[i] >= '0' && num[i] <= '9') { + dig = (num[i] - '0'); + } else { + return 0; + } + ret += dig * base; + } + } + + return ret; +} + +FORCE_INLINE size_t getLen(size_t old, size_t size) { + if (old == 1) { + old = 2; + } + + while (old < size) { + old = (old * 1.5); + } + + return old; +} + +static char *ensureSpace(char *dest, size_t *curSize, size_t size) { + if (*curSize < size) { + *curSize = getLen(*curSize, size); + + char *tmp = realloc(dest, *curSize); + if (tmp == NULL) { + free(dest); + return NULL; + } + + return tmp; + } + + return dest; +} + +char *strreplace(const char *str, const char *pattern, const char *rep) { + if (str == NULL || pattern == NULL || rep == NULL) { + return NULL; + } + + const char *s = str; + + size_t oldLen = strlen(str); + size_t newLen = oldLen; + + size_t repLen = strlen(rep); + size_t patternLen = strlen(pattern); + + char *dest = calloc(1, oldLen + 1); + if (dest == NULL) { + return NULL; + } + + if (patternLen == 0) { + return strcpy(dest, str); + } + + int32_t start = 0; + + while (1) { + char *p = strstr(str, pattern); + if (p == NULL) { // remain does not contain pattern + size_t remain = (oldLen - (str - s)); + size_t size = remain + start + 1; + + dest = ensureSpace(dest, &newLen, size); + if (dest == NULL) { + return NULL; + } + + strcpy(dest + start, str); + dest[start + remain] = 0; + break; + } + + size_t len = p - str; + size_t size = start + len + repLen + 1; + + dest = ensureSpace(dest, &newLen, size); + if (dest == NULL) { + return NULL; + } + + memcpy(dest + start, str, len); + + str += (len + patternLen); + start += len; + + memcpy(dest + start, rep, repLen); + start += repLen; + } + + return dest; +} + +int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]) { + int32_t i; + char hexval[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + for (i = 0; i < len; i++) { + hexstr[i * 2] = hexval[((bytes[i] >> 4) & 0xF)]; + hexstr[(i * 2) + 1] = hexval[(bytes[i]) & 0x0F]; + } + + return 0; +} + +int32_t taosHexStrToByteArray(char hexstr[], char bytes[]) { + int32_t len, i; + char ch; + // char *by; + + len = (int32_t)strlen((char *)hexstr) / 2; + + for (i = 0; i < len; i++) { + ch = hexstr[i * 2]; + if (ch >= '0' && ch <= '9') + bytes[i] = (char)(ch - '0'); + else if (ch >= 'A' && ch <= 'F') + bytes[i] = (char)(ch - 'A' + 10); + else if (ch >= 'a' && ch <= 'f') + bytes[i] = (char)(ch - 'a' + 10); + else + return -1; + + ch = hexstr[i * 2 + 1]; + if (ch >= '0' && ch <= '9') + bytes[i] = (char)((bytes[i] << 4) + (ch - '0')); + else if (ch >= 'A' && ch <= 'F') + bytes[i] = (char)((bytes[i] << 4) + (ch - 'A' + 10)); + else if (ch >= 'a' && ch <= 'f') + bytes[i] = (char)((bytes[i] << 4) + (ch - 'a' + 10)); + else + return -1; + } + + return 0; +} + +// rename file name +int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstPath) { + int32_t ts = taosGetTimestampSec(); + + char fname[PATH_MAX] = {0}; // max file name length must be less than 255 + + char *delimiterPos = strrchr(fullPath, delimiter); + if (delimiterPos == NULL) return -1; + + int32_t fileNameLen = 0; + if (suffix) + fileNameLen = snprintf(fname, PATH_MAX, "%s.%d.%s", delimiterPos + 1, ts, suffix); + else + fileNameLen = snprintf(fname, PATH_MAX, "%s.%d", delimiterPos + 1, ts); + + size_t len = (size_t)((delimiterPos - fullPath) + fileNameLen + 1); + if (*dstPath == NULL) { + *dstPath = calloc(1, len + 1); + if (*dstPath == NULL) return -1; + } + + strncpy(*dstPath, fullPath, (size_t)(delimiterPos - fullPath + 1)); + strncat(*dstPath, fname, (size_t)fileNameLen); + (*dstPath)[len] = 0; + + return rename(fullPath, *dstPath); +} + +bool taosCheckDbName(char *db, char *monitordb) { + char *pos = strchr(db, '.'); + if (pos == NULL) return false; + + return strcmp(pos + 1, monitordb) == 0; +} + +bool taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs) { +#ifdef USE_LIBICONV + iconv_t cd = iconv_open(tsCharset, DEFAULT_UNICODE_ENCODEC); + size_t ucs4_input_len = ucs4_max_len; + size_t outLen = ucs4_max_len; + if (iconv(cd, (char **)&ucs4, &ucs4_input_len, &mbs, &outLen) == -1) { + iconv_close(cd); + return false; + } + iconv_close(cd); + return true; +#else + mbstate_t state = {0}; + int32_t len = (int32_t) wcsnrtombs(NULL, (const wchar_t **) &ucs4, ucs4_max_len / 4, 0, &state); + if (len < 0) { + return false; + } + memset(&state, 0, sizeof(state)); + len = wcsnrtombs(mbs, (const wchar_t **) &ucs4, ucs4_max_len / 4, (size_t) len, &state); + if (len < 0) { + return false; + } + return true; +#endif +} + +bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len) { + memset(ucs4, 0, ucs4_max_len); +#ifdef USE_LIBICONV + iconv_t cd = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + size_t ucs4_input_len = mbs_len; + size_t outLen = ucs4_max_len; + if (iconv(cd, &mbs, &ucs4_input_len, &ucs4, &outLen) == -1) { + iconv_close(cd); + return false; + } + iconv_close(cd); + return true; +#else + mbstate_t state = {0}; + int32_t len = mbsnrtowcs((wchar_t *) ucs4, (const char **) &mbs, mbs_len, ucs4_max_len / 4, &state); + return len >= 0; +#endif +} + +bool taosValidateEncodec(char *encodec) { +#ifdef USE_LIBICONV + iconv_t cd = iconv_open(encodec, DEFAULT_UNICODE_ENCODEC); + if (cd == (iconv_t)(-1)) { + return false; + } + iconv_close(cd); + return true; +#else + return true; +#endif +} \ No newline at end of file diff --git a/src/util/src/version.c b/src/util/src/version.c new file mode 100755 index 000000000000..aad6dc0322b2 --- /dev/null +++ b/src/util/src/version.c @@ -0,0 +1,4 @@ +char version[64] = "1.6.0.0"; +char compatible_version[64] = "1.6.0.0"; +char gitinfo[128] = "82cbce3261d06ab37c3bd4786c7b2e3d2316c42a"; +char buildinfo[512] = "Built by ubuntu at 2019-07-05 18:42"; diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml new file mode 100644 index 000000000000..c7e6acec7589 --- /dev/null +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -0,0 +1,33 @@ + + + 4.0.0 + + com.taosdata.jdbc + jdbcdemo + 1.0-SNAPSHOT + jar + + + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + + + + + com.taosdata.jdbc + taos-jdbcdriver + 1.0.0 + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/TSDBSyncSample.java b/tests/examples/JDBC/JDBCDemo/src/main/java/TSDBSyncSample.java new file mode 100644 index 000000000000..c093b604da6d --- /dev/null +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/TSDBSyncSample.java @@ -0,0 +1,205 @@ +import java.sql.*; + +public class TSDBSyncSample { + private static final String JDBC_PROTOCAL = "jdbc:TAOS://"; + private static final String TSDB_DRIVER = "com.taosdata.jdbc.TSDBDriver"; + + private String host = "127.0.0.1"; + private String user = "root"; + private String password = "taosdata"; + private int port = 0; + private String jdbcUrl = ""; + + private String databaseName = "db"; + private String metricsName = "mt"; + private String tablePrefix = "t"; + + private int tablesCount = 1; + private int loopCount = 2; + private int batchSize = 10; + private long beginTimestamp = 1519833600000L; + + private long rowsInserted = 0; + + static { + try { + Class.forName(TSDB_DRIVER); + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * @param args + */ + public static void main(String[] args) { + TSDBSyncSample tester = new TSDBSyncSample(); + tester.doReadArgument(args); + + System.out.println("---------------------------------------------------------------"); + System.out.println("Start testing..."); + System.out.println("---------------------------------------------------------------"); + + tester.doMakeJdbcUrl(); + tester.doCreateDbAndTable(); + tester.doExecuteInsert(); + tester.doExecuteQuery(); + + System.out.println("\n---------------------------------------------------------------"); + System.out.println("Stop testing..."); + System.out.println("---------------------------------------------------------------"); + } + + private void doReadArgument(String[] args) { + System.out.println("Arguments format: host tables loop batchs"); + if (args.length >= 1) { + this.host = args[0]; + } + + if (args.length >= 2) { + this.tablesCount = Integer.parseInt(args[1]); + } + + if (args.length >= 3) { + this.loopCount = Integer.parseInt(args[2]); + } + + if (args.length >= 4) { + this.batchSize = Integer.parseInt(args[3]); + } + } + + private void doMakeJdbcUrl() { + // jdbc:TSDB://127.0.0.1:0/dbname?user=root&password=taosdata + System.out.println("\nJDBC URL to use:"); + this.jdbcUrl = String.format("%s%s:%d/%s?user=%s&password=%s", JDBC_PROTOCAL, this.host, this.port, "", + this.user, this.password); + System.out.println(this.jdbcUrl); + } + + private void doCreateDbAndTable() { + System.out.println("\n---------------------------------------------------------------"); + System.out.println("Start creating databases and tables..."); + String sql = ""; + try (Connection conn = DriverManager.getConnection(jdbcUrl); + Statement stmt = conn.createStatement()){ + + sql = "create database if not exists " + this.databaseName; + stmt.executeUpdate(sql); + System.out.printf("Successfully executed: %s\n", sql); + + sql = "use " + this.databaseName; + stmt.executeUpdate(sql); + System.out.printf("Successfully executed: %s\n", sql); + + sql = "create table if not exists " + this.metricsName + " (ts timestamp, v1 int) tags(t1 int)"; + stmt.executeUpdate(sql); + System.out.printf("Successfully executed: %s\n", sql); + + for (int i = 0; i < this.tablesCount; i++) { + sql = String.format("create table if not exists %s%d using %s tags(%d)", this.tablePrefix, i, + this.metricsName, i); + stmt.executeUpdate(sql); + System.out.printf("Successfully executed: %s\n", sql); + } + } catch (SQLException e) { + e.printStackTrace(); + System.out.printf("Failed to execute SQL: %s\n", sql); + System.exit(4); + } catch (Exception e) { + e.printStackTrace(); + System.exit(4); + } + System.out.println("Successfully created databases and tables"); + } + + public void doExecuteInsert() { + System.out.println("\n---------------------------------------------------------------"); + System.out.println("Start inserting data..."); + int start = (int) System.currentTimeMillis(); + StringBuilder sql = new StringBuilder(""); + try (Connection conn = DriverManager.getConnection(jdbcUrl); + Statement stmt = conn.createStatement()){ + stmt.executeUpdate("use " + databaseName); + for (int loop = 0; loop < this.loopCount; loop++) { + for (int table = 0; table < this.tablesCount; ++table) { + sql = new StringBuilder("insert into "); + sql.append(this.tablePrefix).append(table).append(" values"); + for (int batch = 0; batch < this.batchSize; ++batch) { + int rows = loop * this.batchSize + batch; + sql.append("(").append(this.beginTimestamp + rows).append(",").append(rows).append(")"); + } + int affectRows = stmt.executeUpdate(sql.toString()); + this.rowsInserted += affectRows; + } + } + } catch (SQLException e) { + e.printStackTrace(); + System.out.printf("Failed to execute SQL: %s\n", sql.toString()); + System.exit(4); + } catch (Exception e) { + e.printStackTrace(); + System.exit(4); + } + int end = (int) System.currentTimeMillis(); + System.out.println("Inserting completed!"); + System.out.printf("Total %d rows inserted, %d rows failed, time spend %d seconds.\n", this.rowsInserted, + this.loopCount * this.batchSize - this.rowsInserted, (end - start) / 1000); + } + + public void doExecuteQuery() { + System.out.println("\n---------------------------------------------------------------"); + System.out.println("Starting querying data..."); + ResultSet resSet = null; + StringBuilder sql = new StringBuilder(""); + StringBuilder resRow = new StringBuilder(""); + try (Connection conn = DriverManager.getConnection(jdbcUrl); + Statement stmt = conn.createStatement()){ + stmt.executeUpdate("use " + databaseName); + for (int i = 0; i < this.tablesCount; ++i) { + sql = new StringBuilder("select * from ").append(this.tablePrefix).append(i); + + resSet = stmt.executeQuery(sql.toString()); + if (resSet == null) { + System.out.println(sql + " failed"); + System.exit(4); + } + + ResultSetMetaData metaData = resSet.getMetaData(); + System.out.println("Retrieve metadata of " + tablePrefix + i); + for (int column = 1; column <= metaData.getColumnCount(); ++column) { + System.out.printf("Column%d: name = %s, type = %d, type name = %s, display size = %d\n", column, metaData.getColumnName(column), metaData.getColumnType(column), + metaData.getColumnTypeName(column), metaData.getColumnDisplaySize(column)); + } + int rows = 0; + System.out.println("Retrieve data of " + tablePrefix + i); + while (resSet.next()) { + resRow = new StringBuilder(); + for (int col = 1; col <= metaData.getColumnCount(); col++) { + resRow.append(metaData.getColumnName(col)).append("=").append(resSet.getObject(col)) + .append(" "); + } + System.out.println(resRow.toString()); + rows++; + } + + try { + if (resSet != null) + resSet.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + System.out.printf("Successfully executed query: %s;\nTotal rows returned: %d\n", sql.toString(), rows); + } + } catch (SQLException e) { + e.printStackTrace(); + System.out.printf("Failed to execute query: %s\n", sql.toString()); + System.exit(4); + } catch (Exception e) { + e.printStackTrace(); + System.exit(4); + } + System.out.println("Query completed!"); + } + +} diff --git a/tests/examples/R/command.txt b/tests/examples/R/command.txt new file mode 100644 index 000000000000..238e9ae88083 --- /dev/null +++ b/tests/examples/R/command.txt @@ -0,0 +1,55 @@ +# Linux Platform +install.packages('rJDBC', repos='http://cran.us.r-project.org') + +# Loading RJDBC packages +library('RJDBC') +# Set up working path and JDBC driver storage location +setwd('C:/TDengine') + +# Load JDBC Driver for TDengine +drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-1.0.0-dist.jar", identifier.quote="\"") + +# Connect to the database +conn<-dbConnect(drv,"jdbc:TSDB://192.168.1.114:0/?user=root&password=taosdata","root","taosdata") + +# Get connection information +dbGetInfo(conn) + +# Using database test +dbSendUpdate(conn, "use test") + +# Insert data +dbSendUpdate(conn, "insert into t1 values(now, 99)") + +# View all tables +table1<-dbGetQuery(conn,"show tables") + +# Functional support for RJDBC + +# List all tables +dbListTables(conn) + +# Is there table iris +dbExistsTable(conn,”iris”) + +# Connect summary information +summary(conn) +dbGetInfo(conn) + +# Read all the data from the T1 table +dbReadTable(conn, "t1") + +# Delete table t1 +dbRemoveTable(conn,"t1") + +# Execute any non-query SQL statements +dbSendUpdate(conn, "create table t1(a timestamp, b int, c nchar(12))"); + +# Write data +dbWriteTable(conn, "t1", t_demo_n, overwrite=FALSE, append=TRUE) + +# Extracting data on demand using SQL statements +dbGetQuery(conn, "select k from tu") + +# Close the connection +dbDisconnect(conn) diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c new file mode 100644 index 000000000000..113f65a44150 --- /dev/null +++ b/tests/examples/c/asyncdemo.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// TAOS asynchronous API example +// this example opens multiple tables, insert/retrieve multiple tables +// it is used by TAOS internally for one performance testing +// to compiple: gcc -o asyncdemo asyncdemo.c -ltaos + +#include +#include +#include +#include +#include + +#include + +int points = 5; +int numOfTables = 3; +int tablesProcessed = 0; +int64_t st, et; + +typedef struct { + int id; + TAOS *taos; + char name[16]; + time_t timeStamp; + int value; + int rowsInserted; + int rowsTried; + int rowsRetrieved; +} STable; + +void taos_insert_call_back(void *param, TAOS_RES *tres, int code); +void taos_select_call_back(void *param, TAOS_RES *tres, int code); +void taos_error(TAOS *taos); + +int main(int argc, char *argv[]) +{ + TAOS *taos; + struct timeval systemTime; + int i; + char sql[1024] = { 0 }; + char prefix[20] = { 0 }; + char db[128] = { 0 }; + STable *tableList; + + if (argc != 5) { + printf("usage: %s server-ip dbname rowsPerTable numOfTables\n", argv[0]); + exit(0); + } + + // a simple way to parse input parameters + if (argc >= 3) strcpy(db, argv[2]); + if (argc >= 4) points = atoi(argv[3]); + if (argc >= 5) numOfTables = atoi(argv[4]); + + size_t size = sizeof(STable) * (size_t)numOfTables; + tableList = (STable *)malloc(size); + memset(tableList, 0, size); + + taos_init(); + + taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) + taos_error(taos); + + printf("success to connect to server\n"); + + sprintf(sql, "drop database %s", db); + taos_query(taos, sql); + + sprintf(sql, "create database %s", db); + if (taos_query(taos, sql) != 0) + taos_error(taos); + + sprintf(sql, "use %s", db); + if (taos_query(taos, sql) != 0) + taos_error(taos); + + strcpy(prefix, "asytbl_"); + for (i = 0; i < numOfTables; ++i) { + tableList[i].id = i; + tableList[i].taos = taos; + sprintf(tableList[i].name, "%s%d", prefix, i); + sprintf(sql, "create table %s%d (ts timestamp, volume bigint)", prefix, i); + if (taos_query(taos, sql) != 0) + taos_error(taos); + } + + gettimeofday(&systemTime, NULL); + for (i = 0; i < numOfTables; ++i) + tableList[i].timeStamp = (time_t)(systemTime.tv_sec) * 1000 + systemTime.tv_usec / 1000; + + printf("success to create tables, press any key to insert\n"); + getchar(); + + printf("start to insert...\n"); + gettimeofday(&systemTime, NULL); + //st = systemTime.tv_sec * 1000000 + systemTime.tv_usec; + + for (i = 0; irowsTried++; + + if (code < 0) { + printf("%s insert failed, code:%d, rows:%d\n", pTable->name, code, pTable->rowsTried); + } + else if (code == 0) { + printf("%s not inserted\n", pTable->name); + } + else { + pTable->rowsInserted++; + } + + if (pTable->rowsTried < points) { + // for this demo, insert another record + sprintf(sql, "insert into %s values(now+%da, %d)", pTable->name, pTable->rowsTried, pTable->rowsTried); + taos_query_a(pTable->taos, sql, taos_insert_call_back, (void *)pTable); + } + else { + printf("%d rows data are inserted into %s\n", points, pTable->name); + tablesProcessed++; + if (tablesProcessed >= numOfTables) { + gettimeofday(&systemTime, NULL); + et = systemTime.tv_sec * 1000000 + systemTime.tv_usec; + printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables); + } + } +} + +void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) +{ + STable *pTable = (STable *)param; + struct timeval systemTime; + + if (numOfRows > 0) { + + for (int i = 0; irowsRetrieved += numOfRows; + + // retrieve next batch of rows + taos_fetch_rows_a(tres, taos_retrieve_call_back, pTable); + + } + else { + if (numOfRows < 0) + printf("%s retrieve failed, code:%d\n", pTable->name, numOfRows); + + taos_free_result(tres); + printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name); + + tablesProcessed++; + if (tablesProcessed >= numOfTables) { + gettimeofday(&systemTime, NULL); + et = systemTime.tv_sec * 1000000 + systemTime.tv_usec; + printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables); + } + } +} + +void taos_select_call_back(void *param, TAOS_RES *tres, int code) +{ + STable *pTable = (STable *)param; + + if (code == 0 && tres) { + // asynchronous API to fetch a batch of records + taos_fetch_rows_a(tres, taos_retrieve_call_back, pTable); + + // taos_fetch_row_a is a less efficient way to retrieve records since it call back app for every row + // taos_fetch_row_a(tres, taos_fetch_row_call_back, pTable); + } + else { + printf("%s select failed, code:%d\n", pTable->name, code); + exit(1); + } +} diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c new file mode 100644 index 000000000000..fec0602aec26 --- /dev/null +++ b/tests/examples/c/demo.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// TAOS standard API example. The same syntax as MySQL, but only a subet +// to compile: gcc -o demo demo.c -ltaos + +#include +#include +#include +#include +#include // TAOS header file + +void taosMsleep(int mseconds); + +int main(int argc, char *argv[]) { + TAOS * taos; + char qstr[1024]; + TAOS_RES *result; + + // connect to server + if (argc < 2) { + printf("please input server-ip \n"); + return 0; + } + + // init TAOS + taos_init(); + + taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connect to server, reason:%s\n", taos_errstr(taos)); + exit(1); + } + printf("success to connect to server\n"); + + + taos_query(taos, "drop database demo"); + if (taos_query(taos, "create database demo") != 0) { + printf("failed to create database, reason:%s\n", taos_errstr(taos)); + exit(1); + } + printf("success to create database\n"); + + taos_query(taos, "use demo"); + + // create table + if (taos_query(taos, "create table m1 (ts timestamp, speed int)") != 0) { + printf("failed to create table, reason:%s\n", taos_errstr(taos)); + exit(1); + } + printf("success to create table\n"); + + // sleep for one second to make sure table is created on data node + // taosMsleep(1000); + + // insert 10 records + int i = 0; + for (i = 0; i < 10; ++i) { + sprintf(qstr, "insert into m1 values (now+%ds, %d)", i, i * 10); + if (taos_query(taos, qstr)) { + printf("failed to insert row: %i, reason:%s\n", i, taos_errstr(taos)); + } + //sleep(1); + } + printf("success to insert rows, total %d rows\n", i); + + // query the records + sprintf(qstr, "SELECT * FROM m1"); + if (taos_query(taos, qstr) != 0) { + printf("failed to select, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + result = taos_use_result(taos); + + if (result == NULL) { + printf("failed to get result, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + TAOS_ROW row; + int rows = 0; + int num_fields = taos_field_count(taos); + TAOS_FIELD *fields = taos_fetch_fields(result); + char temp[256]; + + printf("select * from table, result:\n"); + // fetch the records row by row + while ((row = taos_fetch_row(result))) { + rows++; + taos_print_row(temp, row, fields, num_fields); + printf("%s\n", temp); + } + + taos_free_result(result); + printf("====demo end====\n\n"); + return getchar(); +} diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile new file mode 100644 index 000000000000..5d4b95e926c8 --- /dev/null +++ b/tests/examples/c/makefile @@ -0,0 +1,23 @@ +# Copyright (c) 2017 by TAOS Technologies, Inc. +# todo: library dependency, header file dependency + +ROOT=./ +TARGET=exe +LFLAGS = '-Wl,-rpath,/usr/local/lib/taos/' -ltaos -lpthread -lm -lrt +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99 + +all: $(TARGET) + +exe: + gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)/asyncdemo $(LFLAGS) + gcc $(CFLAGS) ./demo.c -o $(ROOT)/demo $(LFLAGS) + gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS) + gcc $(CFLAGS) ./subscribe.c -o $(ROOT)/subscribe $(LFLAGS) + +clean: + rm $(ROOT)asyncdemo + rm $(ROOT)demo + rm $(ROOT)stream + rm $(ROOT)subscribe + + \ No newline at end of file diff --git a/tests/examples/c/stream.c b/tests/examples/c/stream.c new file mode 100755 index 000000000000..429d303a92eb --- /dev/null +++ b/tests/examples/c/stream.c @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include // include TDengine header file + +typedef struct { + char server_ip[64]; + char db_name[64]; + char tbl_name[64]; +} param; + +int g_thread_exit_flag = 0; +int insert_rows(void *sarg); + +void streamCallBack(void *param, TAOS_RES *res, TAOS_ROW row) +{ + // in this simple demo, it just print out the result + char temp[128]; + + TAOS_FIELD *fields = taos_fetch_fields(res); + int numFields = taos_num_fields(res); + + taos_print_row(temp, row, fields, numFields); + + printf("\n%s\n", temp); +} + +int main(int argc, char *argv[]) +{ + TAOS *taos; + char db_name[64]; + char tbl_name[64]; + char sql[1024] = { 0 }; + char command[1024] = { 0 }; + + if (argc != 4) { + printf("usage: %s server-ip dbname tblname\n", argv[0]); + exit(0); + } + + // init TAOS + taos_init(); + + strcpy(db_name, argv[2]); + strcpy(tbl_name, argv[3]); + + // create pthread to insert into row per second for stream calc + param *t_param = (param *)malloc(sizeof(param)); + if (NULL == t_param) + { + printf("failed to malloc\n"); + exit(1); + } + memset(t_param, 0, sizeof(param)); + strcpy(t_param->server_ip, argv[1]); + strcpy(t_param->db_name, db_name); + strcpy(t_param->tbl_name, tbl_name); + + pthread_t pid; + pthread_create(&pid, NULL, insert_rows, t_param); + + sleep(3); // waiting for database is created. + // open connection to database + taos = taos_connect(argv[1], "root", "taosdata", db_name, 0); + if (taos == NULL) { + printf("failed to connet to server:%s\n", argv[1]); + free(t_param); + exit(1); + } + + // starting stream calc, + printf("please input stream SQL:[e.g., select count(*) from tblname interval(10s);]\n"); + #if 0 + fgets(sql, sizeof(sql), stdin); + if (sql[0] == 0) { + printf("input NULL stream SQL, so exit!\n"); + free(t_param); + exit(1); + } + #endif + strcpy(sql, "select count(*) from tblname interval(3s);"); + + // param is set to NULL in this demo, it shall be set to the pointer to app context + TAOS_STREAM *pStream = taos_open_stream(taos, sql, streamCallBack, 0, NULL, NULL); + if (NULL == pStream) { + printf("failed to create stream: %s\n", taos_errstr(taos)); + free(t_param); + exit(1); + } + + printf("presss any key to exit\n"); + getchar(); + + taos_close_stream(pStream); + + g_thread_exit_flag = 1; + pthread_join(pid, NULL); + + taos_close(taos); + free(t_param); + + return 0; +} + + +int insert_rows(void *sarg) +{ + TAOS *taos; + char command[1024] = { 0 }; + param *winfo = (param * )sarg; + + if (NULL == winfo){ + printf("para is null!\n"); + exit(1); + } + + taos = taos_connect(winfo->server_ip, "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connet to server:%s\n", winfo->server_ip); + exit(1); + } + + // drop database + sprintf(command, "drop database %s;", winfo->db_name); + if (taos_query(taos, command) != 0) { + printf("failed to drop database, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + // create database + sprintf(command, "create database %s;", winfo->db_name); + if (taos_query(taos, command) != 0) { + printf("failed to create database, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + // use database + sprintf(command, "use %s;", winfo->db_name); + if (taos_query(taos, command) != 0) { + printf("failed to use database, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + // create table + sprintf(command, "create table %s (ts timestamp, speed int);", winfo->tbl_name); + if (taos_query(taos, command) != 0) { + printf("failed to create table, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + // insert data + int index = 0; + while (1) { + if (g_thread_exit_flag) break; + + index++; + sprintf(command, "insert into %s values (now, %d)", winfo->tbl_name, index); + if (taos_query(taos, command)) { + printf("failed to insert row [%s], reason:%s\n", command, taos_errstr(taos)); + } + sleep(1); + } + + taos_close(taos); + return 0; +} + + + +#if 0 +int bak_main(int argc, char *argv[]) +{ + TAOS *taos; + char db_name[64]; + char tbl_name[64]; + char sql[1024] = { 0 }; + char command[1024] = { 0 }; + + if (argc != 4) { + printf("usage: %s server-ip dbname tblname\n", argv[0]); + exit(0); + } + + // init TAOS + taos_init(); + + // open connection to database + taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("failed to connet to server:%s\n", argv[1]); + exit(1); + } + + strcpy(db_name, argv[2]); + strcpy(tbl_name, argv[3]); + + // drop database + sprintf(command, "drop database %s;", db_name); + if (taos_query(taos, command) != 0) { + printf("failed to drop database, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + sprintf(command, "create database %s;", db_name); + if (taos_query(taos, command) != 0) { + printf("failed to create database, reason:%s\n", taos_errstr(taos)); + exit(1); + } + else { + printf("create database[%s] success!\n", db_name); + } + + // create table + sprintf(command, "create table %s.%s (ts timestamp, speed int);", db_name, tbl_name); + if (taos_query(taos, command) != 0) { + printf("failed to create table, reason:%s\n", taos_errstr(taos)); + exit(1); + } + else { + printf("create table[%s] success!\n", tbl_name); + } + + // create pthread to insert into row per second for stream calc + param *t_param = (param *)malloc(sizeof(param)); + if (NULL == t_param) + { + printf("failed to malloc\n"); + exit(1); + } + memset(t_param, 0, sizeof(param)); + + strcpy(t_param->db_name, db_name); + strcpy(t_param->tbl_name, tbl_name); + t_param->taos = taos_connect(argv[1], "root", "taosdata", db_name, 0); + if (t_param->taos == NULL) { + printf("failed to connet to server:%s\n", argv[1]); + free(t_param); + exit(1); + } + + pthread_t pid; + pthread_create(&pid, NULL, insertRow, t_param); + + printf("start inserting records into the m1 table ......\n"); + sleep(5); + + // starting stream calc, + printf("please input stream SQL:[e.g., select count(*) from streamdb.m1 interval(10s) sliding(2s);]\n"); + fgets(sql, sizeof(sql), stdin); + if (sql[0] == 0) { + printf("input NULL stream SQL, so exit!\n"); + free(t_param); + exit(1); + } + + sprintf(command, "use %s", db_name); + if (taos_query(taos, command) != 0) { + printf("failed to use %s, reason:%s\n", db_name, taos_errstr(taos)); + exit(1); + } + // param is set to NULL in this demo, it shall be set to the pointer to app context + TAOS_STREAM *pStream = taos_open_stream(taos, sql, streamCallBack, 0, NULL, NULL); + if (NULL == pStream) { + printf("failed to create stream: %s\n", taos_errstr(taos)); + free(t_param); + exit(1); + } + else { + printf("success to create stream\n"); + } + + printf("presss any key to exit\n"); + getchar(); + + taos_close_stream(pStream); + + g_thread_exit_flag = 1; + pthread_join(pid, NULL); + + taos_close(taos); + taos_close(t_param->taos); + free(t_param); + + return 0; +} +#endif diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c new file mode 100644 index 000000000000..01f42c031936 --- /dev/null +++ b/tests/examples/c/subscribe.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// sample code for TDengine subscribe/consume API +// to compile: gcc -o subscribe subscribe.c -ltaos + +#include +#include +#include +#include // include TDengine header file + +int main(int argc, char *argv[]) +{ + TAOS_SUB *tsub; + TAOS_ROW row; + char dbname[64], table[64]; + char temp[256]; + + if ( argc == 1 ) { + printf("usage: %s server-ip db-name table-name \n", argv[0]); + exit(0); + } + + if ( argc >= 2 ) strcpy(dbname, argv[2]); + if ( argc >= 3 ) strcpy(table, argv[3]); + + tsub = taos_subscribe(argv[1], "root", "taosdata", dbname, table, 0, 1000); + if ( tsub == NULL ) { + printf("failed to connet to db:%s\n", dbname); + exit(1); + } + + TAOS_FIELD *fields = taos_fetch_subfields(tsub); + int fcount = taos_subfields_count(tsub); + + printf("start to retrieve data\n"); + printf("please use other taos client, insert rows into %s.%s\n", dbname, table); + while ( 1 ) { + row = taos_consume(tsub); + if ( row == NULL ) break; + + taos_print_row(temp, row, fields, fcount); + printf("%s\n", temp); + } + + taos_unsubscribe(tsub); + + return 0; +} + diff --git a/tests/examples/go/src/taosapp/taosapp.go b/tests/examples/go/src/taosapp/taosapp.go new file mode 100755 index 000000000000..75f780e61f8f --- /dev/null +++ b/tests/examples/go/src/taosapp/taosapp.go @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package main + +import ( + "database/sql" + "time" + "log" + "fmt" + _ "taosSql" +) + +func main() { + taosDriverName := "taosSql" + demodb := "demodb" + demot := "demot" + + fmt.Printf("\n======== start demo test ========\n") + // open connect to taos server + db, err := sql.Open(taosDriverName, "root:taosdata@/tcp(127.0.0.1)/demodb") + if err != nil { + log.Fatalf("Open database error: %s\n", err) + } + defer db.Close() + + drop_database(db, demodb) + create_database(db, demodb) + use_database(db, demodb) + create_table(db, demot) + insert_data(db, demot) + select_data(db, demot) + + fmt.Printf("\n======== start stmt mode test ========\n") + + demodbStmt := "demodbStmt" + demotStmt := "demotStmt" + drop_database_stmt(db, demodbStmt) + create_database_stmt(db, demodbStmt) + use_database_stmt(db, demodbStmt) + create_table_stmt(db, demotStmt) + insert_data_stmt(db, demotStmt) + select_data_stmt(db, demotStmt) + + fmt.Printf("\n======== end demo test ========\n") +} + +func drop_database(db *sql.DB, demodb string) { + st := time.Now().Nanosecond() + res, err := db.Exec("drop database " + demodb) + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + + fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func create_database(db *sql.DB, demodb string) { + st := time.Now().Nanosecond() + // create database + res, err := db.Exec("create database " + demodb) + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + + fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) + + return +} + +func use_database(db *sql.DB, demodb string) { + st := time.Now().Nanosecond() + // use database + res, err := db.Exec("use " + demodb) // notes: must no quote to db name + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + + fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func create_table(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + // create table + res, err := db.Exec("create table " + demot + " (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)") + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func insert_data(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + // insert data + res, err := db.Exec("insert into " + demot + + " values (now, 100, 'beijing', 10, true, 'one', 123.456, 123.456)" + + " (now+1s, 101, 'shanghai', 11, true, 'two', 789.123, 789.123)" + + " (now+2s, 102, 'shenzhen', 12, false, 'three', 456.789, 456.789)") + + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func select_data(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + + rows, err := db.Query("select * from ? " , demot) // go text mode + checkErr(err) + + fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ","ts", " ", "id"," ", "name"," ","len", " ","flag"," ", "notes", " ", "fv", " ", " ", "dv") + var affectd int + for rows.Next() { + var ts string + var name string + var id int + var len int8 + var flag bool + var notes string + var fv float32 + var dv float64 + + err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) + checkErr(err) + + fmt.Printf("%s\t", ts) + fmt.Printf("%d\t",id) + fmt.Printf("%10s\t",name) + fmt.Printf("%d\t",len) + fmt.Printf("%t\t",flag) + fmt.Printf("%s\t",notes) + fmt.Printf("%06.3f\t",fv) + fmt.Printf("%09.6f\n",dv) + + affectd++ + } + + et := time.Now().Nanosecond() + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func drop_database_stmt(db *sql.DB,demodb string) { + st := time.Now().Nanosecond() + // drop test db + stmt, err := db.Prepare("drop database ?") + checkErr(err) + defer stmt.Close() + + res, err := stmt.Exec(demodb) + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func create_database_stmt(db *sql.DB,demodb string) { + st := time.Now().Nanosecond() + // create database + //var stmt interface{} + stmt, err := db.Prepare("create database ?") + checkErr(err) + + //var res driver.Result + res, err := stmt.Exec(demodb) + checkErr(err) + + //fmt.Printf("Query OK, %d row(s) affected()", res.RowsAffected()) + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func use_database_stmt (db *sql.DB,demodb string) { + st := time.Now().Nanosecond() + // create database + //var stmt interface{} + stmt, err := db.Prepare("use " + demodb) + checkErr(err) + + res, err := stmt.Exec() + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func create_table_stmt (db *sql.DB,demot string) { + st := time.Now().Nanosecond() + // create table + // (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double) + stmt, err := db.Prepare("create table ? (? timestamp, ? int, ? binary(8), ? tinyint, ? bool, ? binary(8), ? float, ? double)") + checkErr(err) + + res, err := stmt.Exec(demot, "ts", "id", "name", "len", "flag", "notes", "fv", "dv") + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func insert_data_stmt(db *sql.DB,demot string) { + st := time.Now().Nanosecond() + // insert data into table + stmt, err := db.Prepare("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?)") + checkErr(err) + + res, err := stmt.Exec(demot, "now" , 1000, "'haidian'" , 6, true, "'AI world'", 6987.654, 321.987, + "now+1s", 1001, "'changyang'" , 7, false, "'DeepMode'", 12356.456, 128634.456, + "now+2s", 1002, "'chuangping'" , 8, true, "'database'", 3879.456, 65433478.456,) + checkErr(err) + + affectd, err := res.RowsAffected() + checkErr(err) + + et := time.Now().Nanosecond() + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func select_data_stmt(db *sql.DB, demot string) { + st := time.Now().Nanosecond() + + stmt, err := db.Prepare("select ?, ?, ?, ?, ?, ?, ?, ? from ?" ) // go binary mode + checkErr(err) + + rows, err := stmt.Query("ts", "id","name","len", "flag","notes", "fv", "dv", demot) + checkErr(err) + + fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %11s%s %14s%s\n", " ","ts", " ", "id"," ", "name"," ","len", " ","flag"," ", "notes", " ", "fv", " ", " ", "dv") + var affectd int + for rows.Next() { + var ts string + var name string + var id int + var len int8 + var flag bool + var notes string + var fv float32 + var dv float64 + + err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) + //fmt.Println("start scan fields from row.rs, &fv:", &fv) + //err = rows.Scan(&fv) + checkErr(err) + + fmt.Printf("%s\t", ts) + fmt.Printf("%d\t",id) + fmt.Printf("%10s\t",name) + fmt.Printf("%d\t",len) + fmt.Printf("%t\t",flag) + fmt.Printf("%s\t",notes) + fmt.Printf("%06.3f\t",fv) + fmt.Printf("%09.6f\n",dv) + + affectd++ + + } + + et := time.Now().Nanosecond() + fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) +} + +func checkErr(err error) { + if err != nil { + panic(err) + } +} \ No newline at end of file diff --git a/tests/examples/matlab/TDengineDemo.m b/tests/examples/matlab/TDengineDemo.m new file mode 100644 index 000000000000..c02d45e8b9a4 --- /dev/null +++ b/tests/examples/matlab/TDengineDemo.m @@ -0,0 +1,128 @@ +%% Connect to TDengine +clear; +fprintf("Connecting to TDengine..."); +dbName = 'tsdb'; +user = 'root'; +password = 'taosdata'; +jdbcDriverName = 'com.taosdata.jdbc.TSDBDriver'; +jdbcUrl = 'jdbc:TSDB://192.168.1.113:0/'; +conn = database(dbName, user, password, jdbcDriverName, jdbcUrl) +if isempty(conn.Message) + fprintf("Connection is successfully established!\n"); +else + fprintf("Failed to connect to server: %s\n", conn.Message); +end + +%% Query a table in TDengine, and store the results in a MATLAB table object 'tb1' +% Please note that the select() function retrieves all rows in a table/supertale into MATLAB +sql = "select ts, distance1 from device1 limit 5"; +fprintf("Execute query: %s\n", sql); +tic +tb1 = select(conn, sql); +timeused = toc; +fprintf("\tQuery completed!\n\tNumber of rows retrieved: %d\n\tNumber of columns in each row: %d\n\tTime used: %g\n", height(tb1), width(tb1), timeused); + +% To go a bit further, we can convert the MATLAB table object to a MATLAB matrix object +data = table2array(tb1) + +%% Query table names in a TDengine supertable, and store the results in a MATLAB table object 'stbmeta' +sql = "select tbname from devices limit 10"; +fprintf("Execute query: %s\n", sql); +tic; +stbmeta = select(conn, sql); +timeused = toc; +fprintf("\tTables in supertable 'devices': %t", stbmeta); +fprintf("\tQuery completed!\n\tNumber of rows retrieved: %d\n\tNumber of columns in each row: %d\n\tTime used: %g\n", height(stbmeta), width(stbmeta), timeused); + +%% Query a TDengine supertable, and stores the results in a MATLAB table object 'stb' +sql = "select ts, distance1 from devices"; +fprintf("Execute query: %s\n", sql); +tic; +stb = select(conn, sql); +timeused = toc; +fprintf("\tQuery completed!\n\tNumber of rows retrieved: %d\n\tNumber of columns in each row: %d\n\tTime used: %g\n", height(stb), width(stb), timeused); + +%% Query TDengine using cursors and specify the number of rows to fetch +sql = 'select * from device1'; +rowLimit = 5; +fprintf("Execute query: %s with row limit set to %d\n", sql, rowLimit); +tic; +% Get cursor +cur = exec(conn, sql); +% Fetch data +cur = fetch(cur, rowLimit); +data = cur.Data +timeused = toc; +fprintf("\tQuery completed!\n\tNumber of rows retrieved: %d\n\tNumber of columns in each row: %d\n\tTime used: %g\n", size(data, 1), size(data, 2), timeused); + +%% Query specific columns in a TDenigine table 'device1', and stores the results directly in a MATLAB cell array 'data' +sql = 'SELECT * FROM device1 order by ts asc'; +fprintf("Execute query: %s\n", sql); +tic; +data = fetch(conn, sql); +timeused = toc; +fprintf("\tQuery completed!\n\tNumber of rows retrieved: %d\n\tNumber of columns in each row: %d\n\tTime used: %g\n", size(data, 1), size(data, 2), timeused); +% Let's now convert the cell array 'data' into some matrices, and make a plot of column 'c1' again the timestamp 'ts' +ts = cell2mat(data(:,1)); +c1 = cell2mat(data(:,2)); + +%% Query aggregation results from a table +% TDengine is so powerful at aggregated computations. Let's calculate the max, mean, standard deviation and min values for every 10 minutes in the +% tb1's timeline, and then plot them together with all the data points in tb1 +sql = sprintf('SELECT max(measure1), avg(measure1), stddev(measure1), min(measure1) FROM device1 WHERE ts >= %d and ts <= %d interval(10m)', ts(1), ts(end)); +fprintf("Execute query: %s\n", sql); +tic; +c1_stats = fetch(conn, sql); +timeused = toc; +fprintf("\tQuery completed!\n\tNumber of rows retrieved: %d\n\tNumber of columns in each row: %d\n\tTime used: %g\n", size(c1_stats, 1), size(c1_stats, 2), timeused); +% Prepare data for plotting. +tsAsDate = datestr(ts/86400000 + datenum(1970,1,1), 'mm-dd HH:MM'); +c1_stats = cell2mat(c1_stats); +c1_stats_ts = c1_stats(:, 1); +c1_stats_max = c1_stats(:, 2); +c1_stats_mean = c1_stats(:, 3); +c1_stats_stddev = c1_stats(:, 4); +c1_stats_min = c1_stats(:, 5); +c1_stats_tsAsDate = datestr(c1_stats(:,1)/86400000 + datenum(1970,1,1), 'mm-dd HH:MM'); + +%% Now let's plot the data and associated statistical aggregation calculation in a figure. +fh = figure(1); +set(fh,'position',[50 50 1300 700]); +h1 = scatter(ts, c1, 5, 'c'); +hold on; +h2 = plot(c1_stats_ts + 300000, c1_stats_max, '-or', 'linewidth', 1); +hold on; +h3 = plot(c1_stats_ts + 300000, c1_stats_mean, '-xg', 'linewidth', 1); +hold on; +h4 = plot(c1_stats_ts + 300000, c1_stats_stddev, '-*y', 'linewidth', 1); +hold on; +h5 = plot(c1_stats_ts + 300000, c1_stats_min, '-+k', 'linewidth', 1); +xlabel('time'); +ylabel('measurement1'); +set(gca, 'xtick',[ts(1),ts(end/4),ts(2*end/4),ts(3*end/4),ts(end)]); +set(gca, 'xticklabel',{tsAsDate(1,:), tsAsDate(end/4,:),tsAsDate(2*end/4,:),tsAsDate(3*end/4,:),tsAsDate(end,:)}); +xlim([ts(1), ts(end)]); +legend([h1, h2, h3, h4, h5], 'data points', 'max per 10 mins', 'mean per 10 mins', 'stddev per 10 mins', 'min per 10 mins'); +title('Device Measurement Monitoring Demo'); +grid on; + +%% Insert data into TDengine using exec() +sql = 'insert into device1 (ts, distance1) values (now, -1)'; +fprintf("Execute query: %s\n", sql); +cur = exec(conn, sql) +sql = 'select * from device1 limit 1'; +fprintf("Execute query: %s\n", sql); +data = select(conn, sql) +conn.close; + +%% Insert data into TDengine using datainsert() +% this is currently not supported + +% colnames = {'ts','c1','c2','c3'}; +% dat = {'now' 99 99 99}; +% tbname = 'plane1'; +% datainsert(conn, tbname, colnames, dat); +% cur = exec(conn, 'select * from ' + tbname); +% cur = fetch(cur, 5); +% data = cur.Data + diff --git a/tests/examples/python/read_example.py b/tests/examples/python/read_example.py new file mode 100644 index 000000000000..3276f296a27c --- /dev/null +++ b/tests/examples/python/read_example.py @@ -0,0 +1,83 @@ +""" +This is the sample code for TDengine python2 client. +""" +import taos +import sys +import datetime +import random + +def exitProgram(conn): + conn.close() + sys.exit() + +if __name__ == '__main__': + start_time = datetime.datetime(2019, 7, 1) + time_interval = datetime.timedelta(seconds=60) + + # Connect to TDengine server. + # + # parameters: + # @host : TDengine server IP address + # @user : Username used to connect to TDengine server + # @password : Password + # @database : Database to use when connecting to TDengine server + # @config : Configuration directory + conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos") + + # Generate a cursor object to run SQL commands + c1 = conn.cursor() + + # Create a database named db + try: + c1.execute('create database db') + except Exception as err: + conn.close() + raise(err) + + # use database + try: + c1.execute('use db') + except Exception as err: + conn.close() + raise(err) + + + # create table + try: + c1.execute('create table if not exists t (ts timestamp, a int, b float, c binary(20))') + except Exception as err: + conn.close() + raise(err) + + # insert data + for i in range(10000): + try: + c1.execute("insert into t values ('%s', %d, %f, '%s')" % (start_time, random.randint(1,10), random.randint(1,10)/10.0, 'hello')) + except Exception as err: + conn.close() + raise(err) + start_time += time_interval + + # query data and return data in the form of list + try: + c1.execute('select * from db.t') + except Exception as err: + conn.close() + raise(err) + + # Column names are in c1.description list + cols = c1.description + # Use fetchall to fetch data in a list + data = c1.fetchall() + + try: + c1.execute('select * from db.t') + except Exception as err: + conn.close() + raise(err) + + # Use iterator to go through the retreived data + for col in c1: + print(col) + + conn.close()